size
int64 0
304k
| ext
stringclasses 1
value | lang
stringclasses 1
value | branch
stringclasses 1
value | content
stringlengths 0
304k
| avg_line_length
float64 0
238
| max_line_length
int64 0
304k
|
---|---|---|---|---|---|---|
1,083 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
from odoo.addons.bus.models.bus_presence import AWAY_TIMER
from odoo.addons.bus.models.bus_presence import DISCONNECTION_TIMER
class ResUsers(models.Model):
_inherit = "res.users"
im_status = fields.Char('IM Status', compute='_compute_im_status')
def _compute_im_status(self):
""" Compute the im_status of the users """
self.env.cr.execute("""
SELECT
user_id as id,
CASE WHEN age(now() AT TIME ZONE 'UTC', last_poll) > interval %s THEN 'offline'
WHEN age(now() AT TIME ZONE 'UTC', last_presence) > interval %s THEN 'away'
ELSE 'online'
END as status
FROM bus_presence
WHERE user_id IN %s
""", ("%s seconds" % DISCONNECTION_TIMER, "%s seconds" % AWAY_TIMER, tuple(self.ids)))
res = dict(((status['id'], status['status']) for status in self.env.cr.dictfetchall()))
for user in self:
user.im_status = res.get(user.id, 'offline')
| 38.678571 | 1,083 |
1,315 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
from odoo.addons.bus.models.bus_presence import AWAY_TIMER
from odoo.addons.bus.models.bus_presence import DISCONNECTION_TIMER
class ResPartner(models.Model):
_inherit = 'res.partner'
im_status = fields.Char('IM Status', compute='_compute_im_status')
def _compute_im_status(self):
self.env.cr.execute("""
SELECT
U.partner_id as id,
CASE WHEN max(B.last_poll) IS NULL THEN 'offline'
WHEN age(now() AT TIME ZONE 'UTC', max(B.last_poll)) > interval %s THEN 'offline'
WHEN age(now() AT TIME ZONE 'UTC', max(B.last_presence)) > interval %s THEN 'away'
ELSE 'online'
END as status
FROM bus_presence B
RIGHT JOIN res_users U ON B.user_id = U.id
WHERE U.partner_id IN %s AND U.active = 't'
GROUP BY U.partner_id
""", ("%s seconds" % DISCONNECTION_TIMER, "%s seconds" % AWAY_TIMER, tuple(self.ids)))
res = dict(((status['id'], status['status']) for status in self.env.cr.dictfetchall()))
for partner in self:
partner.im_status = res.get(partner.id, 'im_partner') # if not found, it is a partner, useful to avoid to refresh status in js
| 45.344828 | 1,315 |
1,945 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import exceptions, _
from odoo.http import Controller, request, route
from odoo.addons.bus.models.bus import dispatch
class BusController(Controller):
# override to add channels
def _poll(self, dbname, channels, last, options):
channels = list(channels) # do not alter original list
channels.append('broadcast')
# update the user presence
if request.session.uid and 'bus_inactivity' in options:
request.env['bus.presence'].update(inactivity_period=options.get('bus_inactivity'), identity_field='user_id', identity_value=request.session.uid)
request.cr.close()
request._cr = None
return dispatch.poll(dbname, channels, last, options)
@route('/longpolling/poll', type="json", auth="public", cors="*")
def poll(self, channels, last, options=None):
if options is None:
options = {}
if not dispatch:
raise Exception("bus.Bus unavailable")
if [c for c in channels if not isinstance(c, str)]:
raise Exception("bus.Bus only string channels are allowed.")
if request.registry.in_test_mode():
raise exceptions.UserError(_("bus.Bus not available in test mode"))
return self._poll(request.db, channels, last, options)
@route('/longpolling/im_status', type="json", auth="user")
def im_status(self, partner_ids):
return request.env['res.partner'].with_context(active_test=False).search([('id', 'in', partner_ids)]).read(['im_status'])
@route('/longpolling/health', type='http', auth='none', save_session=False)
def health(self):
data = json.dumps({
'status': 'pass',
})
headers = [('Content-Type', 'application/json'),
('Cache-Control', 'no-store')]
return request.make_response(data, headers)
| 42.282609 | 1,945 |
803 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
{
'name': 'Questions on Events',
'description': 'Questions on Events',
'category': 'Marketing',
'version': '1.2',
'depends': ['website_event'],
'data': [
'views/event_views.xml',
'views/event_registration_answer_views.xml',
'views/event_registration_views.xml',
'views/event_question_views.xml',
'views/event_templates.xml',
'security/security.xml',
'security/ir.model.access.csv',
],
'demo': [
'data/event_question_demo.xml',
'data/event_demo.xml',
'data/event_registration_demo.xml',
],
'installable': True,
'assets': {
'web.assets_tests': [
'website_event_questions/static/tests/**/*',
],
},
'license': 'LGPL-3',
}
| 26.766667 | 803 |
3,945 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import fields, tests
@tests.tagged('post_install', '-at_install')
class TestUi(tests.HttpCase):
def test_01_tickets_questions(self):
""" Will execute the tour that fills up two tickets with a few questions answers
and then assert that the answers are correctly saved for each attendee. """
self.design_fair_event = self.env['event.event'].create({
'name': 'Design Fair New York',
'date_begin': fields.Datetime.now() - relativedelta(days=15),
'date_end': fields.Datetime.now() + relativedelta(days=15),
'event_ticket_ids': [(0, 0, {
'name': 'Free',
'start_sale_datetime': fields.Datetime.now() - relativedelta(days=15)
}), (0, 0, {
'name': 'Other',
'start_sale_datetime': fields.Datetime.now() - relativedelta(days=15)
})],
'website_published': True,
'question_ids': [(0, 0, {
'title': 'Meal Type',
'question_type': 'simple_choice',
'answer_ids': [
(0, 0, {'name': 'Mixed'}),
(0, 0, {'name': 'Vegetarian'}),
(0, 0, {'name': 'Pastafarian'})
]
}), (0, 0, {
'title': 'Allergies',
'question_type': 'text_box'
}), (0, 0, {
'title': 'How did you learn about this event?',
'question_type': 'simple_choice',
'once_per_order': True,
'answer_ids': [
(0, 0, {'name': 'Our website'}),
(0, 0, {'name': 'Commercials'}),
(0, 0, {'name': 'A friend'})
]
})]
})
self.start_tour("/", 'test_tickets_questions', login="portal")
registrations = self.env['event.registration'].search([
('email', 'in', ['[email protected]', '[email protected]'])
])
self.assertEqual(len(registrations), 2)
first_registration = registrations.filtered(lambda reg: reg.email == '[email protected]')
second_registration = registrations.filtered(lambda reg: reg.email == '[email protected]')
self.assertEqual(first_registration.name, 'Attendee A')
self.assertEqual(first_registration.phone, '+32499123456')
self.assertEqual(second_registration.name, 'Attendee B')
event_questions = registrations.mapped('event_id.question_ids')
self.assertEqual(len(event_questions), 3)
first_registration_answers = first_registration.registration_answer_ids
self.assertEqual(len(first_registration_answers), 3)
self.assertEqual(first_registration_answers.filtered(
lambda answer: answer.question_id.title == 'Meal Type'
).value_answer_id.name, 'Vegetarian')
self.assertEqual(first_registration_answers.filtered(
lambda answer: answer.question_id.title == 'Allergies'
).value_text_box, 'Fish and Nuts')
self.assertEqual(first_registration_answers.filtered(
lambda answer: answer.question_id.title == 'How did you learn about this event?'
).value_answer_id.name, 'A friend')
second_registration_answers = second_registration.registration_answer_ids
self.assertEqual(len(second_registration_answers), 2)
self.assertEqual(second_registration_answers.filtered(
lambda answer: answer.question_id.title == 'Meal Type'
).value_answer_id.name, 'Pastafarian')
self.assertEqual(first_registration_answers.filtered(
lambda answer: answer.question_id.title == 'How did you learn about this event?'
).value_answer_id.name, 'A friend')
| 43.833333 | 3,945 |
1,329 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.event.tests.common import TestEventCommon
class TestEventQuestionCommon(TestEventCommon):
@classmethod
def setUpClass(cls):
super(TestEventQuestionCommon, cls).setUpClass()
cls.event_question_1 = cls.env['event.question'].create({
'title': 'Question1',
'question_type': 'simple_choice',
'event_type_id': cls.event_type_complex.id,
'once_per_order': False,
'answer_ids': [
(0, 0, {'name': 'Q1-Answer1'}),
(0, 0, {'name': 'Q1-Answer2'})
],
})
cls.event_question_2 = cls.env['event.question'].create({
'title': 'Question2',
'question_type': 'simple_choice',
'event_type_id': cls.event_type_complex.id,
'once_per_order': True,
'answer_ids': [
(0, 0, {'name': 'Q2-Answer1'}),
(0, 0, {'name': 'Q2-Answer2'})
],
})
cls.event_question_3 = cls.env['event.question'].create({
'title': 'Question3',
'question_type': 'text_box',
'event_type_id': cls.event_type_complex.id,
'once_per_order': True,
})
| 34.973684 | 1,329 |
4,136 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from odoo.fields import Datetime as FieldsDatetime
from odoo.tests.common import users
from odoo.addons.website.tools import MockRequest
from odoo.addons.website_event_questions.controllers.main import WebsiteEvent
from odoo.addons.website_event_questions.tests.common import TestEventQuestionCommon
class TestEventData(TestEventQuestionCommon):
@users('user_eventmanager')
def test_event_type_configuration_from_type(self):
event_type = self.event_type_complex.with_user(self.env.user)
event = self.env['event.event'].create({
'name': 'Event Update Type',
'event_type_id': event_type.id,
'date_begin': FieldsDatetime.to_string(datetime.today() + timedelta(days=1)),
'date_end': FieldsDatetime.to_string(datetime.today() + timedelta(days=15)),
})
self.assertEqual(
event.question_ids.mapped('question_type'),
['simple_choice', 'simple_choice', 'text_box'])
self.assertEqual(event.specific_question_ids.title, 'Question1')
self.assertEqual(
set(event.specific_question_ids.mapped('answer_ids.name')),
set(['Q1-Answer1', 'Q1-Answer2']))
self.assertEqual(len(event.general_question_ids), 2)
self.assertEqual(event.general_question_ids[0].title, 'Question2')
self.assertEqual(event.general_question_ids[1].title, 'Question3')
self.assertEqual(
set(event.general_question_ids[0].mapped('answer_ids.name')),
set(['Q2-Answer1', 'Q2-Answer2']))
def test_process_attendees_form(self):
event = self.env['event.event'].create({
'name': 'Event Update Type',
'event_type_id': self.event_type_complex.with_user(self.env.user).id,
'date_begin': FieldsDatetime.to_string(datetime.today() + timedelta(days=1)),
'date_end': FieldsDatetime.to_string(datetime.today() + timedelta(days=15)),
})
ticket_id_1 = self.env['event.event.ticket'].create([{
'name': 'Regular',
'event_id': event.id,
'seats_max': 200,
}])
ticket_id_2 = self.env['event.event.ticket'].create([{
'name': 'VIP',
'event_id': event.id,
'seats_max': 200,
}])
form_details = {
'1-name': 'Pixis',
'1-email': '[email protected]',
'1-phone': '+32444444444',
'1-event_ticket_id': ticket_id_1.id,
'2-name': 'Geluchat',
'2-email': '[email protected]',
'2-phone': '+32777777777',
'2-event_ticket_id': ticket_id_2.id,
'question_answer-1-%s' % self.event_question_1.id: '5',
'question_answer-2-%s' % self.event_question_1.id: '9',
'question_answer-0-%s' % self.event_question_2.id: '7',
'question_answer-0-%s' % self.event_question_3.id: 'Free Text',
}
with MockRequest(self.env):
registrations = WebsiteEvent()._process_attendees_form(event, form_details)
self.assertEqual(registrations, [
{'name': 'Pixis', 'email': '[email protected]', 'phone': '+32444444444', 'event_ticket_id': ticket_id_1.id,
'registration_answer_ids': [
(0, 0, {'question_id': self.event_question_1.id, 'value_answer_id': 5}),
(0, 0, {'question_id': self.event_question_2.id, 'value_answer_id': 7}),
(0, 0, {'question_id': self.event_question_3.id, 'value_text_box': 'Free Text'})]},
{'name': 'Geluchat', 'email': '[email protected]', 'phone': '+32777777777', 'event_ticket_id': ticket_id_2.id,
'registration_answer_ids': [
(0, 0, {'question_id': self.event_question_1.id, 'value_answer_id': 9}),
(0, 0, {'question_id': self.event_question_2.id, 'value_answer_id': 7}),
(0, 0, {'question_id': self.event_question_3.id, 'value_text_box': 'Free Text'})]}
])
| 47.54023 | 4,136 |
3,437 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class EventQuestion(models.Model):
_name = 'event.question'
_rec_name = 'title'
_order = 'sequence,id'
_description = 'Event Question'
title = fields.Char(required=True, translate=True)
question_type = fields.Selection([
('simple_choice', 'Selection'),
('text_box', 'Text Input')], default='simple_choice', string="Question Type", required=True)
event_type_id = fields.Many2one('event.type', 'Event Type', ondelete='cascade')
event_id = fields.Many2one('event.event', 'Event', ondelete='cascade')
answer_ids = fields.One2many('event.question.answer', 'question_id', "Answers", copy=True)
sequence = fields.Integer(default=10)
once_per_order = fields.Boolean('Ask only once per order',
help="If True, this question will be asked only once and its value will be propagated to every attendees."
"If not it will be asked for every attendee of a reservation.")
@api.constrains('event_type_id', 'event_id')
def _constrains_event(self):
if any(question.event_type_id and question.event_id for question in self):
raise UserError(_('Question cannot belong to both the event category and itself.'))
def write(self, vals):
""" We add a check to prevent changing the question_type of a question that already has answers.
Indeed, it would mess up the event.registration.answer (answer type not matching the question type). """
if 'question_type' in vals:
questions_new_type = self.filtered(lambda question: question.question_type != vals['question_type'])
if questions_new_type:
answer_count = self.env['event.registration.answer'].search_count([('question_id', 'in', questions_new_type.ids)])
if answer_count > 0:
raise UserError(_("You cannot change the question type of a question that already has answers!"))
return super(EventQuestion, self).write(vals)
def action_view_question_answers(self):
""" Allow analyzing the attendees answers to event questions in a convenient way:
- A graph view showing counts of each suggestions for simple_choice questions
(Along with secondary pivot and tree views)
- A tree view showing textual answers values for text_box questions. """
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("website_event_questions.action_event_registration_report")
action['domain'] = [('question_id', '=', self.id)]
if self.question_type == 'simple_choice':
action['views'] = [(False, 'graph'), (False, 'pivot'), (False, 'tree')]
elif self.question_type == 'text_box':
action['views'] = [(False, 'tree')]
return action
class EventQuestionAnswer(models.Model):
""" Contains suggested answers to a 'simple_choice' event.question. """
_name = 'event.question.answer'
_order = 'sequence,id'
_description = 'Event Question Answer'
name = fields.Char('Answer', required=True, translate=True)
question_id = fields.Many2one('event.question', required=True, ondelete='cascade')
sequence = fields.Integer(default=10)
| 52.876923 | 3,437 |
1,382 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class EventRegistration(models.Model):
""" Store answers on attendees. """
_inherit = 'event.registration'
registration_answer_ids = fields.One2many('event.registration.answer', 'registration_id', string='Attendee Answers')
class EventRegistrationAnswer(models.Model):
""" Represents the user input answer for a single event.question """
_name = 'event.registration.answer'
_description = 'Event Registration Answer'
question_id = fields.Many2one(
'event.question', ondelete='restrict', required=True,
domain="[('event_id', '=', event_id)]")
registration_id = fields.Many2one('event.registration', required=True, ondelete='cascade')
partner_id = fields.Many2one('res.partner', related='registration_id.partner_id')
event_id = fields.Many2one('event.event', related='registration_id.event_id')
question_type = fields.Selection(related='question_id.question_type')
value_answer_id = fields.Many2one('event.question.answer', string="Suggested answer")
value_text_box = fields.Text('Text answer')
_sql_constraints = [
('value_check', "CHECK(value_answer_id IS NOT NULL OR COALESCE(value_text_box, '') <> '')", "There must be a suggested value or a text value.")
]
| 46.066667 | 1,382 |
2,886 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class EventType(models.Model):
_inherit = 'event.type'
question_ids = fields.One2many(
'event.question', 'event_type_id',
string='Questions', copy=True)
class EventEvent(models.Model):
""" Override Event model to add optional questions when buying tickets. """
_inherit = 'event.event'
question_ids = fields.One2many(
'event.question', 'event_id', 'Questions', copy=True,
compute='_compute_question_ids', readonly=False, store=True)
general_question_ids = fields.One2many('event.question', 'event_id', 'General Questions',
domain=[('once_per_order', '=', True)])
specific_question_ids = fields.One2many('event.question', 'event_id', 'Specific Questions',
domain=[('once_per_order', '=', False)])
@api.depends('event_type_id')
def _compute_question_ids(self):
""" Update event questions from its event type. Depends are set only on
event_type_id itself to emulate an onchange. Changing event type content
itself should not trigger this method.
When synchronizing questions:
* lines that no answer are removed;
* type lines are added;
"""
if self._origin.question_ids:
# lines to keep: those with already given answers
questions_tokeep_ids = self.env['event.registration.answer'].search(
[('question_id', 'in', self._origin.question_ids.ids)]
).question_id.ids
else:
questions_tokeep_ids = []
for event in self:
if not event.event_type_id and not event.question_ids:
event.question_ids = False
continue
if questions_tokeep_ids:
questions_toremove = event._origin.question_ids.filtered(lambda question: question.id not in questions_tokeep_ids)
command = [(3, question.id) for question in questions_toremove]
else:
command = [(5, 0)]
if event.event_type_id.question_ids:
command += [
(0, 0, {
'title': question.title,
'question_type': question.question_type,
'sequence': question.sequence,
'once_per_order': question.once_per_order,
'answer_ids': [(0, 0, {
'name': answer.name,
'sequence': answer.sequence
}) for answer in question.answer_ids],
}) for question in event.event_type_id.question_ids
]
event.question_ids = command
| 42.441176 | 2,886 |
2,117 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.http import request
from odoo.addons.website_event.controllers.main import WebsiteEventController
class WebsiteEvent(WebsiteEventController):
def _process_attendees_form(self, event, form_details):
""" Process data posted from the attendee details form.
Extracts question answers:
- For both questions asked 'once_per_order' and questions asked to every attendee
- For questions of type 'simple_choice', extracting the suggested answer id
- For questions of type 'text_box', extracting the text answer of the attendee. """
registrations = super(WebsiteEvent, self)._process_attendees_form(event, form_details)
for registration in registrations:
registration['registration_answer_ids'] = []
general_answer_ids = []
for key, value in form_details.items():
if 'question_answer' in key and value:
dummy, registration_index, question_id = key.split('-')
question_sudo = request.env['event.question'].browse(int(question_id))
answer_values = None
if question_sudo.question_type == 'simple_choice':
answer_values = {
'question_id': int(question_id),
'value_answer_id': int(value)
}
elif question_sudo.question_type == 'text_box':
answer_values = {
'question_id': int(question_id),
'value_text_box': value
}
if answer_values and not int(registration_index):
general_answer_ids.append((0, 0, answer_values))
elif answer_values:
registrations[int(registration_index) - 1]['registration_answer_ids'].append((0, 0, answer_values))
for registration in registrations:
registration['registration_answer_ids'].extend(general_answer_ids)
return registrations
| 45.042553 | 2,117 |
528 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Send SMS to Visitor with leads',
'category': 'Website/Website',
'sequence': 54,
'summary': 'Allows to send sms to website visitor that have lead',
'version': '1.0',
'description': """Allows to send sms to website visitor if the visitor is linked to a lead.""",
'depends': ['website_sms', 'crm'],
'data': [],
'installable': True,
'auto_install': True,
'license': 'LGPL-3',
}
| 35.2 | 528 |
1,216 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class WebsiteVisitor(models.Model):
_inherit = 'website.visitor'
def _check_for_sms_composer(self):
check = super(WebsiteVisitor, self)._check_for_sms_composer()
if not check and self.lead_ids:
sorted_leads = self.lead_ids.filtered(lambda l: l.mobile == self.mobile or l.phone == self.mobile)._sort_by_confidence_level(reverse=True)
if sorted_leads:
return True
return check
def _prepare_sms_composer_context(self):
if not self.partner_id and self.lead_ids:
leads_with_number = self.lead_ids.filtered(lambda l: l.mobile == self.mobile or l.phone == self.mobile)._sort_by_confidence_level(reverse=True)
if leads_with_number:
lead = leads_with_number[0]
return {
'default_res_model': 'crm.lead',
'default_res_id': lead.id,
'number_field_name': 'mobile' if lead.mobile == self.mobile else 'phone',
}
return super(WebsiteVisitor, self)._prepare_sms_composer_context()
| 43.428571 | 1,216 |
1,852 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2019 - Blanco Martín & Asociados. https://www.bmya.cl
{
'name': 'Chile - Accounting',
'version': "3.0",
'description': """
Chilean accounting chart and tax localization.
Plan contable chileno e impuestos de acuerdo a disposiciones vigentes
""",
'author': 'Blanco Martín & Asociados',
'website': 'https://www.odoo.com/documentation/15.0/applications/finance/accounting/fiscal_localizations/localizations/chile.html',
'category': 'Accounting/Localizations/Account Charts',
'depends': [
'contacts',
'base_address_city',
'base_vat',
'l10n_latam_base',
'l10n_latam_invoice_document',
'uom',
],
'data': [
'views/account_move_view.xml',
'views/account_tax_view.xml',
'views/res_bank_view.xml',
'views/res_country_view.xml',
'views/report_invoice.xml',
'views/res_partner.xml',
'views/res_config_settings_view.xml',
'data/l10n_cl_chart_data.xml',
'data/account_tax_report_data.xml',
'data/account_tax_group_data.xml',
'data/account_tax_tags_data.xml',
'data/account_tax_data.xml',
'data/l10n_latam_identification_type_data.xml',
'data/l10n_latam.document.type.csv',
'data/menuitem_data.xml',
'data/product_data.xml',
'data/uom_data.xml',
'data/res.currency.csv',
'data/res_currency_data.xml',
'data/res.bank.csv',
'data/res.country.csv',
'data/res_partner.xml',
'data/account_fiscal_template.xml',
'data/account_chart_template_data.xml',
],
'demo': [
'demo/demo_company.xml',
'demo/partner_demo.xml',
],
'license': 'LGPL-3',
}
| 34.90566 | 1,850 |
1,520 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
import logging
from odoo import api, models
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = "account.chart.template"
@api.model
def _get_demo_data(self):
yield ('res.partner', {
'base.res_partner_12': {
'l10n_cl_sii_taxpayer_type': '4',
},
'base.res_partner_2': {
'l10n_cl_sii_taxpayer_type': '4',
},
})
yield ('l10n_latam.document.type', {
'l10n_cl.dc_fe_dte': {'active': True},
})
for model, data in super()._get_demo_data():
yield model, data
@api.model
def _get_demo_data_move(self):
ref = self.env.ref
cid = self.env.company.id
model, data = super()._get_demo_data_move()
if self.env.company.account_fiscal_country_id.code == "CL":
foreign = ref('l10n_cl.dc_fe_dte').id
self.env['account.journal'].search([
('type', '=', 'purchase'),
('company_id', '=', self.env.company.id),
]).l10n_latam_use_documents = False
data[f'{cid}_demo_invoice_1']['l10n_latam_document_type_id'] = foreign
data[f'{cid}_demo_invoice_2']['l10n_latam_document_type_id'] = foreign
data[f'{cid}_demo_invoice_3']['l10n_latam_document_type_id'] = foreign
data[f'{cid}_demo_invoice_followup']['l10n_latam_document_type_id'] = foreign
return model, data
| 35.348837 | 1,520 |
830 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class AccountTax(models.Model):
_name = 'account.tax'
_inherit = 'account.tax'
l10n_cl_sii_code = fields.Integer('SII Code', group_operator=False)
class AccountTaxTemplate(models.Model):
_name = 'account.tax.template'
_inherit = 'account.tax.template'
l10n_cl_sii_code = fields.Integer('SII Code')
def _get_tax_vals(self, company, tax_template_to_tax):
self.ensure_one()
vals = super(AccountTaxTemplate, self)._get_tax_vals(company, tax_template_to_tax)
vals.update({
'l10n_cl_sii_code': self.l10n_cl_sii_code,
})
if self.tax_group_id:
vals['tax_group_id'] = self.tax_group_id.id
return vals
| 30.740741 | 830 |
9,451 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import ValidationError
from odoo import models, fields, api, _
from odoo.osv import expression
SII_VAT = '60805000-0'
class AccountMove(models.Model):
_inherit = "account.move"
partner_id_vat = fields.Char(related='partner_id.vat', string='VAT No')
l10n_latam_internal_type = fields.Selection(
related='l10n_latam_document_type_id.internal_type', string='L10n Latam Internal Type')
def _get_l10n_latam_documents_domain(self):
self.ensure_one()
if self.journal_id.company_id.account_fiscal_country_id != self.env.ref('base.cl') or not \
self.journal_id.l10n_latam_use_documents:
return super()._get_l10n_latam_documents_domain()
if self.journal_id.type == 'sale':
domain = [('country_id.code', '=', 'CL')]
if self.move_type in ['in_invoice', 'out_invoice']:
domain += [('internal_type', 'in', ['invoice', 'debit_note', 'invoice_in'])]
elif self.move_type in ['in_refund', 'out_refund']:
domain += [('internal_type', '=', 'credit_note')]
if self.company_id.partner_id.l10n_cl_sii_taxpayer_type == '1':
domain += [('code', '!=', '71')] # Companies with VAT Affected doesn't have "Boleta de honorarios Electrónica"
return domain
if self.move_type == 'in_refund':
internal_types_domain = ('internal_type', '=', 'credit_note')
else:
internal_types_domain = ('internal_type', 'in', ['invoice', 'debit_note', 'invoice_in'])
domain = [
('country_id.code', '=', 'CL'),
internal_types_domain,
]
if self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat != '60805000-0':
domain += [('code', 'not in', ['39', '70', '71', '914', '911'])]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat == '60805000-0':
domain += [('code', 'not in', ['39', '70', '71'])]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '2':
domain += [('code', 'in', ['70', '71', '56', '61'])]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '3':
domain += [('code', 'in', ['35', '38', '39', '41', '56', '61'])]
elif self.partner_id.country_id.code != 'CL' or self.partner_id.l10n_cl_sii_taxpayer_type == '4':
domain += [('code', '=', '46')]
else:
domain += [('code', 'in', [])]
return domain
def _check_document_types_post(self):
for rec in self.filtered(
lambda r: r.company_id.account_fiscal_country_id.code == "CL" and
r.journal_id.type in ['sale', 'purchase']):
tax_payer_type = rec.partner_id.l10n_cl_sii_taxpayer_type
vat = rec.partner_id.vat
country_id = rec.partner_id.country_id
latam_document_type_code = rec.l10n_latam_document_type_id.code
if (not tax_payer_type or not vat) and (country_id.code == "CL" and latam_document_type_code
and latam_document_type_code not in ['35', '38', '39', '41']):
raise ValidationError(_('Tax payer type and vat number are mandatory for this type of '
'document. Please set the current tax payer type of this customer'))
if rec.journal_id.type == 'sale' and rec.journal_id.l10n_latam_use_documents:
if country_id.code != "CL":
if not ((tax_payer_type == '4' and latam_document_type_code in ['110', '111', '112']) or (
tax_payer_type == '3' and latam_document_type_code in ['39', '41', '61', '56'])):
raise ValidationError(_(
'Document types for foreign customers must be export type (codes 110, 111 or 112) or you \
should define the customer as an end consumer and use receipts (codes 39 or 41)'))
if rec.journal_id.type == 'purchase' and rec.journal_id.l10n_latam_use_documents:
if vat != SII_VAT and latam_document_type_code == '914':
raise ValidationError(_('The DIN document is intended to be used only with RUT 60805000-0'
' (Tesorería General de La República)'))
if not tax_payer_type or not vat:
if country_id.code == "CL" and latam_document_type_code not in [
'35', '38', '39', '41']:
raise ValidationError(_('Tax payer type and vat number are mandatory for this type of '
'document. Please set the current tax payer type of this supplier'))
if tax_payer_type == '2' and latam_document_type_code not in ['70', '71', '56', '61']:
raise ValidationError(_('The tax payer type of this supplier is incorrect for the selected type'
' of document.'))
if tax_payer_type in ['1', '3']:
if latam_document_type_code in ['70', '71']:
raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver '
'fees documents'))
if latam_document_type_code in ['110', '111', '112']:
raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver '
'imports documents'))
if (tax_payer_type == '4' or country_id.code != "CL") and latam_document_type_code != '46':
raise ValidationError(_('You need a journal without the use of documents for foreign '
'suppliers'))
@api.onchange('journal_id')
def _l10n_cl_onchange_journal(self):
if self.company_id.country_id.code == 'CL':
self.l10n_latam_document_type_id = False
def _post(self, soft=True):
self._check_document_types_post()
return super()._post(soft)
def _l10n_cl_get_formatted_sequence(self, number=0):
return '%s %06d' % (self.l10n_latam_document_type_id.doc_code_prefix, number)
def _get_starting_sequence(self):
""" If use documents then will create a new starting sequence using the document type code prefix and the
journal document number with a 6 padding number """
if self.journal_id.l10n_latam_use_documents and self.company_id.account_fiscal_country_id.code == "CL":
if self.l10n_latam_document_type_id:
return self._l10n_cl_get_formatted_sequence()
return super()._get_starting_sequence()
def _get_last_sequence_domain(self, relaxed=False):
where_string, param = super(AccountMove, self)._get_last_sequence_domain(relaxed)
if self.company_id.account_fiscal_country_id.code == "CL" and self.l10n_latam_use_documents:
where_string = where_string.replace('journal_id = %(journal_id)s AND', '')
where_string += ' AND l10n_latam_document_type_id = %(l10n_latam_document_type_id)s AND ' \
'company_id = %(company_id)s AND move_type IN %(move_type)s'
param['company_id'] = self.company_id.id or False
param['l10n_latam_document_type_id'] = self.l10n_latam_document_type_id.id or 0
param['move_type'] = (('in_invoice', 'in_refund') if
self.l10n_latam_document_type_id._is_doc_type_vendor() else ('out_invoice', 'out_refund'))
return where_string, param
def _get_name_invoice_report(self):
self.ensure_one()
if self.l10n_latam_use_documents and self.company_id.account_fiscal_country_id.code == 'CL':
return 'l10n_cl.report_invoice_document'
return super()._get_name_invoice_report()
def _l10n_cl_get_invoice_totals_for_report(self):
self.ensure_one()
tax_ids_filter = tax_line_id_filter = None
include_sii = self._l10n_cl_include_sii()
if include_sii:
tax_ids_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14))
tax_line_id_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14))
tax_lines_data = self._prepare_tax_lines_data_for_totals_from_invoice(
tax_ids_filter=tax_ids_filter, tax_line_id_filter=tax_line_id_filter)
if include_sii:
amount_untaxed = self.currency_id.round(
self.amount_total - sum([x['tax_amount'] for x in tax_lines_data if 'tax_amount' in x]))
else:
amount_untaxed = self.amount_untaxed
return self._get_tax_totals(self.partner_id, tax_lines_data, self.amount_total, amount_untaxed, self.currency_id)
def _l10n_cl_include_sii(self):
self.ensure_one()
return self.l10n_latam_document_type_id.code in ['39', '41', '110', '111', '112', '34']
def _is_manual_document_number(self):
if self.journal_id.company_id.country_id.code == 'CL':
return self.journal_id.type == 'purchase' and not self.l10n_latam_document_type_id._is_doc_type_vendor()
return super()._is_manual_document_number()
| 59.05 | 9,448 |
402 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResPartner(models.Model):
_name = 'res.country'
_inherit = 'res.country'
l10n_cl_customs_code = fields.Char('Customs Code')
l10n_cl_customs_name = fields.Char('Customs Name')
l10n_cl_customs_abbreviation = fields.Char('Customs Abbreviation')
| 33.5 | 402 |
1,105 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class L10nLatamDocumentType(models.Model):
_inherit = 'l10n_latam.document.type'
internal_type = fields.Selection(
selection_add=[
('invoice', 'Invoices'),
('invoice_in', 'Purchase Invoices'),
('debit_note', 'Debit Notes'),
('credit_note', 'Credit Notes'),
('receipt_invoice', 'Receipt Invoice')])
def _format_document_number(self, document_number):
""" Make validation of Import Dispatch Number
* making validations on the document_number. If it is wrong it should raise an exception
* format the document_number against a pattern and return it
"""
self.ensure_one()
if self.country_id.code != "CL":
return super()._format_document_number(document_number)
if not document_number:
return False
return document_number.zfill(6)
def _is_doc_type_vendor(self):
return self.code == '46'
| 33.484848 | 1,105 |
1,388 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
def _l10n_cl_prices_and_taxes(self):
self.ensure_one()
invoice = self.move_id
included_taxes = self.tax_ids.filtered(lambda x: x.l10n_cl_sii_code == 14) if self.move_id._l10n_cl_include_sii() else self.tax_ids
if not included_taxes:
price_unit = self.tax_ids.with_context(round=False).compute_all(
self.price_unit, invoice.currency_id, 1.0, self.product_id, invoice.partner_id)
price_unit = price_unit['total_excluded']
price_subtotal = self.price_subtotal
else:
price_unit = included_taxes.compute_all(
self.price_unit, invoice.currency_id, 1.0, self.product_id, invoice.partner_id)['total_included']
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
price_subtotal = included_taxes.compute_all(
price, invoice.currency_id, self.quantity, self.product_id, invoice.partner_id)['total_included']
price_net = price_unit * (1 - (self.discount or 0.0) / 100.0)
return {
'price_unit': price_unit,
'price_subtotal': price_subtotal,
'price_net': price_net
}
| 44.774194 | 1,388 |
246 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResBank(models.Model):
_name = 'res.bank'
_inherit = 'res.bank'
l10n_cl_sbif_code = fields.Char('Cod. SBIF', size=10)
| 27.333333 | 246 |
415 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class ResCompany(models.Model):
_inherit = "res.company"
def _localization_use_documents(self):
""" Chilean localization use documents """
self.ensure_one()
return self.account_fiscal_country_id.code == "CL" or super()._localization_use_documents()
| 34.583333 | 415 |
316 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, api, fields, models
class ResCurrency(models.Model):
_name = "res.currency"
_inherit = "res.currency"
l10n_cl_currency_code = fields.Char('Currency Code')
l10n_cl_short_name = fields.Char('Short Name')
| 31.6 | 316 |
219 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, api, _
class UomUom(models.Model):
_inherit = 'uom.uom'
l10n_cl_sii_code = fields.Char('SII Code')
| 24.333333 | 219 |
593 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
from odoo.http import request
class AccountChartTemplate(models.Model):
_inherit = 'account.chart.template'
def _load(self, sale_tax_rate, purchase_tax_rate, company):
""" Set tax calculation rounding method required in Chilean localization"""
res = super()._load(sale_tax_rate, purchase_tax_rate, company)
if company.account_fiscal_country_id.code == 'CL':
company.write({'tax_calculation_rounding_method': 'round_globally'})
return res
| 39.533333 | 593 |
2,851 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import stdnum
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
class ResPartner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
_sii_taxpayer_types = [
('1', _('VAT Affected (1st Category)')),
('2', _('Fees Receipt Issuer (2nd category)')),
('3', _('End Consumer')),
('4', _('Foreigner')),
]
l10n_cl_sii_taxpayer_type = fields.Selection(
_sii_taxpayer_types, 'Taxpayer Type', index=True,
help='1 - VAT Affected (1st Category) (Most of the cases)\n'
'2 - Fees Receipt Issuer (Applies to suppliers who issue fees receipt)\n'
'3 - End consumer (only receipts)\n'
'4 - Foreigner')
@api.model
def _commercial_fields(self):
return super()._commercial_fields() + ['l10n_cl_sii_taxpayer_type']
def _format_vat_cl(self, values):
identification_types = [self.env.ref('l10n_latam_base.it_vat').id, self.env.ref('l10n_cl.it_RUT').id,
self.env.ref('l10n_cl.it_RUN').id]
country = self.env["res.country"].browse(values.get('country_id'))
identification_type = self.env['l10n_latam.identification.type'].browse(
values.get('l10n_latam_identification_type_id')
)
partner_country_is_chile = country.code == "CL" or identification_type.country_id.code == "CL"
if partner_country_is_chile and \
values.get('l10n_latam_identification_type_id') in identification_types and values.get('vat'):
return stdnum.util.get_cc_module('cl', 'vat').format(values['vat']).replace('.', '').replace(
'CL', '').upper()
else:
return values['vat']
def _format_dotted_vat_cl(self, vat):
vat_l = vat.split('-')
n_vat, n_dv = vat_l[0], vat_l[1]
return '%s-%s' % (format(int(n_vat), ',d').replace(',', '.'), n_dv)
@api.model
def create(self, values):
if values.get('vat'):
values['vat'] = self._format_vat_cl(values)
return super().create(values)
def write(self, values):
if any(field in values for field in ['vat', 'l10n_latam_identification_type_id', 'country_id']):
for record in self:
vat_values = {
'vat': values.get('vat', record.vat),
'l10n_latam_identification_type_id': values.get(
'l10n_latam_identification_type_id', record.l10n_latam_identification_type_id.id),
'country_id': values.get('country_id', record.country_id.id)
}
values['vat'] = self._format_vat_cl(vat_values)
return super().write(values)
| 43.19697 | 2,851 |
776 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'OAuth2 Authentication',
'category': 'Hidden/Tools',
'description': """
Allow users to login through OAuth2 Provider.
=============================================
""",
'maintainer': 'Odoo S.A.',
'depends': ['base', 'web', 'base_setup', 'auth_signup'],
'data': [
'data/auth_oauth_data.xml',
'views/auth_oauth_views.xml',
'views/res_users_views.xml',
'views/res_config_settings_views.xml',
'views/auth_oauth_templates.xml',
'security/ir.model.access.csv',
],
'assets': {
'web.assets_frontend': [
'auth_oauth/static/**/*',
],
},
'license': 'LGPL-3',
}
| 28.740741 | 776 |
1,184 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class AuthOAuthProvider(models.Model):
"""Class defining the configuration values of an OAuth2 provider"""
_name = 'auth.oauth.provider'
_description = 'OAuth2 provider'
_order = 'sequence, name'
name = fields.Char(string='Provider name', required=True) # Name of the OAuth2 entity, Google, etc
client_id = fields.Char(string='Client ID') # Our identifier
auth_endpoint = fields.Char(string='Authorization URL', required=True) # OAuth provider URL to authenticate users
scope = fields.Char(default='openid profile email') # OAUth user data desired to access
validation_endpoint = fields.Char(string='UserInfo URL', required=True) # OAuth provider URL to get user information
data_endpoint = fields.Char()
enabled = fields.Boolean(string='Allowed')
css_class = fields.Char(string='CSS class', default='fa fa-fw fa-sign-in text-primary')
body = fields.Char(required=True, string="Login button label", help='Link text in Login Dialog', translate=True)
sequence = fields.Integer(default=10)
| 51.478261 | 1,184 |
540 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class IrConfigParameter(models.Model):
_inherit = 'ir.config_parameter'
def init(self, force=False):
super(IrConfigParameter, self).init(force=force)
if force:
oauth_oe = self.env.ref('auth_oauth.provider_openerp')
if not oauth_oe:
return
dbuuid = self.sudo().get_param('database.uuid')
oauth_oe.write({'client_id': dbuuid})
| 31.764706 | 540 |
6,092 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import requests
import werkzeug.http
from odoo import api, fields, models
from odoo.exceptions import AccessDenied, UserError
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons import base
base.models.res_users.USER_PRIVATE_FIELDS.append('oauth_access_token')
class ResUsers(models.Model):
_inherit = 'res.users'
oauth_provider_id = fields.Many2one('auth.oauth.provider', string='OAuth Provider')
oauth_uid = fields.Char(string='OAuth User ID', help="Oauth Provider user_id", copy=False)
oauth_access_token = fields.Char(string='OAuth Access Token', readonly=True, copy=False)
_sql_constraints = [
('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'),
]
def _auth_oauth_rpc(self, endpoint, access_token):
if self.env['ir.config_parameter'].sudo().get_param('auth_oauth.authorization_header'):
response = requests.get(endpoint, headers={'Authorization': 'Bearer %s' % access_token}, timeout=10)
else:
response = requests.get(endpoint, params={'access_token': access_token}, timeout=10)
if response.ok: # nb: could be a successful failure
return response.json()
auth_challenge = werkzeug.http.parse_www_authenticate_header(
response.headers.get('WWW-Authenticate'))
if auth_challenge.type == 'bearer' and 'error' in auth_challenge:
return dict(auth_challenge)
return {'error': 'invalid_request'}
@api.model
def _auth_oauth_validate(self, provider, access_token):
""" return the validation data corresponding to the access token """
oauth_provider = self.env['auth.oauth.provider'].browse(provider)
validation = self._auth_oauth_rpc(oauth_provider.validation_endpoint, access_token)
if validation.get("error"):
raise Exception(validation['error'])
if oauth_provider.data_endpoint:
data = self._auth_oauth_rpc(oauth_provider.data_endpoint, access_token)
validation.update(data)
# unify subject key, pop all possible and get most sensible. When this
# is reworked, BC should be dropped and only the `sub` key should be
# used (here, in _generate_signup_values, and in _auth_oauth_signin)
subject = next(filter(None, [
validation.pop(key, None)
for key in [
'sub', # standard
'id', # google v1 userinfo, facebook opengraph
'user_id', # google tokeninfo, odoo (tokeninfo)
]
]), None)
if not subject:
raise AccessDenied('Missing subject identity')
validation['user_id'] = subject
return validation
@api.model
def _generate_signup_values(self, provider, validation, params):
oauth_uid = validation['user_id']
email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid))
name = validation.get('name', email)
return {
'name': name,
'login': email,
'email': email,
'oauth_provider_id': provider,
'oauth_uid': oauth_uid,
'oauth_access_token': params['access_token'],
'active': True,
}
@api.model
def _auth_oauth_signin(self, provider, validation, params):
""" retrieve and sign in the user corresponding to provider and validated access token
:param provider: oauth provider id (int)
:param validation: result of validation of access token (dict)
:param params: oauth parameters (dict)
:return: user login (str)
:raise: AccessDenied if signin failed
This method can be overridden to add alternative signin methods.
"""
oauth_uid = validation['user_id']
try:
oauth_user = self.search([("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not oauth_user:
raise AccessDenied()
assert len(oauth_user) == 1
oauth_user.write({'oauth_access_token': params['access_token']})
return oauth_user.login
except AccessDenied as access_denied_exception:
if self.env.context.get('no_user_creation'):
return None
state = json.loads(params['state'])
token = state.get('t')
values = self._generate_signup_values(provider, validation, params)
try:
_, login, _ = self.signup(values, token)
return login
except (SignupError, UserError):
raise access_denied_exception
@api.model
def auth_oauth(self, provider, params):
# Advice by Google (to avoid Confused Deputy Problem)
# if validation.audience != OUR_CLIENT_ID:
# abort()
# else:
# continue with the process
access_token = params.get('access_token')
validation = self._auth_oauth_validate(provider, access_token)
# retrieve and sign in user
login = self._auth_oauth_signin(provider, validation, params)
if not login:
raise AccessDenied()
# return user credentials
return (self.env.cr.dbname, login, access_token)
def _check_credentials(self, password, env):
try:
return super(ResUsers, self)._check_credentials(password, env)
except AccessDenied:
passwd_allowed = env['interactive'] or not self.env.user._rpc_api_keys_only()
if passwd_allowed and self.env.user.active:
res = self.sudo().search([('id', '=', self.env.uid), ('oauth_access_token', '=', password)])
if res:
return
raise
def _get_session_token_fields(self):
return super(ResUsers, self)._get_session_token_fields() | {'oauth_access_token'}
| 42.013793 | 6,092 |
1,387 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
@api.model
def get_uri(self):
return "%s/auth_oauth/signin" % (self.env['ir.config_parameter'].get_param('web.base.url'))
auth_oauth_google_enabled = fields.Boolean(string='Allow users to sign in with Google')
auth_oauth_google_client_id = fields.Char(string='Client ID')
server_uri_google = fields.Char(string='Server uri')
@api.model
def get_values(self):
res = super(ResConfigSettings, self).get_values()
google_provider = self.env.ref('auth_oauth.provider_google', False)
google_provider and res.update(
auth_oauth_google_enabled=google_provider.enabled,
auth_oauth_google_client_id=google_provider.client_id,
server_uri_google=self.get_uri(),
)
return res
def set_values(self):
super(ResConfigSettings, self).set_values()
google_provider = self.env.ref('auth_oauth.provider_google', False)
google_provider and google_provider.write({
'enabled': self.auth_oauth_google_enabled,
'client_id': self.auth_oauth_google_client_id,
})
| 35.564103 | 1,387 |
7,575 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import functools
import json
import logging
import os
import werkzeug.urls
import werkzeug.utils
from werkzeug.exceptions import BadRequest
from odoo import api, http, SUPERUSER_ID, _
from odoo.exceptions import AccessDenied
from odoo.http import request
from odoo import registry as registry_get
from odoo.addons.auth_signup.controllers.main import AuthSignupHome as Home
from odoo.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
kw.pop('debug', False)
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
providers = request.env['auth.oauth.provider'].sudo().search_read([('enabled', '=', True)])
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=json.dumps(state),
# nonce=base64.urlsafe_b64encode(os.urandom(16)),
)
provider['auth_link'] = "%s?%s" % (provider['auth_endpoint'], werkzeug.urls.url_encode(params))
return providers
def get_state(self, provider):
redirect = request.params.get('redirect') or 'web'
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect[1:] if redirect[0] == '/' else redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.urls.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return request.redirect(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
def get_auth_signup_qcontext(self):
result = super(OAuthLogin, self).get_auth_signup_qcontext()
result["providers"] = self.list_providers()
return result
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = json.loads(kw['state'])
dbname = state['d']
if not http.db_filter([dbname]):
return BadRequest()
provider = state['p']
context = state.get('c', {})
registry = registry_get(dbname)
with registry.cursor() as cr:
try:
env = api.Environment(cr, SUPERUSER_ID, context)
credentials = env['res.users'].sudo().auth_oauth(provider, kw)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.urls.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
resp = login_and_redirect(*credentials, redirect_url=url)
# Since /web is hardcoded, verify user has right to land on it
if werkzeug.urls.url_parse(resp.location).path == '/web' and not request.env.user.has_group('base.group_user'):
resp.location = '/'
return resp
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = request.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception as e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
if not http.db_filter([dbname]):
return BadRequest()
registry = registry_get(dbname)
with registry.cursor() as cr:
try:
env = api.Environment(cr, SUPERUSER_ID, {})
provider = env.ref('auth_oauth.provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert provider._name == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider.id,
'c': {'no_user_creation': True},
}
kw['state'] = json.dumps(state)
return self.signin(**kw)
| 38.647959 | 7,575 |
869 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Website profile',
'category': 'Hidden',
'version': '1.0',
'summary': 'Access the website profile of the users',
'description': "Allows to access the website profile of the users and see their statistics (karma, badges, etc..)",
'depends': [
'website_partner',
'gamification'
],
'data': [
'data/mail_template_data.xml',
'views/gamification_badge_views.xml',
'views/website_profile.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'assets': {
'web.assets_frontend': [
'website_profile/static/src/scss/website_profile.scss',
'website_profile/static/src/js/website_profile.js',
],
},
'license': 'LGPL-3',
}
| 31.035714 | 869 |
2,504 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hashlib
import uuid
from datetime import datetime
from werkzeug import urls
from odoo import api, models
VALIDATION_KARMA_GAIN = 3
class Users(models.Model):
_inherit = 'res.users'
@property
def SELF_READABLE_FIELDS(self):
return super().SELF_READABLE_FIELDS + ['karma']
@property
def SELF_WRITEABLE_FIELDS(self):
return super().SELF_WRITEABLE_FIELDS + [
'country_id', 'city', 'website', 'website_description', 'website_published',
]
@api.model
def _generate_profile_token(self, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
profile_uuid = self.env['ir.config_parameter'].sudo().get_param('website_profile.uuid')
if not profile_uuid:
profile_uuid = str(uuid.uuid4())
self.env['ir.config_parameter'].sudo().set_param('website_profile.uuid', profile_uuid)
return hashlib.sha256((u'%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
profile_uuid,
user_id,
email
)).encode('utf-8')).hexdigest()
def _send_profile_validation_email(self, **kwargs):
if not self.email:
return False
token = self._generate_profile_token(self.id, self.email)
activation_template = self.env.ref('website_profile.validation_email')
if activation_template:
params = {
'token': token,
'user_id': self.id,
'email': self.email
}
params.update(kwargs)
token_url = self.get_base_url() + '/profile/validate_email?%s' % urls.url_encode(params)
with self._cr.savepoint():
activation_template.sudo().with_context(token_url=token_url).send_mail(
self.id, force_send=True, raise_exception=True)
return True
def _process_profile_validation_token(self, token, email):
self.ensure_one()
validation_token = self._generate_profile_token(self.id, email)
if token == validation_token and self.karma == 0:
return self.write({'karma': VALIDATION_KARMA_GAIN})
return False
| 37.939394 | 2,504 |
263 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class GamificationBadge(models.Model):
_name = 'gamification.badge'
_inherit = ['gamification.badge', 'website.published.mixin']
| 29.222222 | 263 |
293 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Website(models.Model):
_inherit = 'website'
karma_profile_min = fields.Integer(string="Minimal karma to see other user's profile", default=150)
| 29.3 | 293 |
14,680 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import werkzeug
import werkzeug.exceptions
import werkzeug.urls
import werkzeug.wrappers
import math
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from odoo import fields, http, modules, tools
from odoo.http import request
from odoo.osv import expression
class WebsiteProfile(http.Controller):
_users_per_page = 30
_pager_max_pages = 5
# Profile
# ---------------------------------------------------
def _check_avatar_access(self, user_id, **post):
""" Base condition to see user avatar independently form access rights
is to see published users having karma, meaning they participated to
frontend applications like forum or elearning. """
try:
user = request.env['res.users'].sudo().browse(user_id).exists()
except:
return False
if user:
return user.website_published and user.karma > 0
return False
def _get_default_avatar(self):
img_path = modules.get_module_resource('web', 'static/img', 'placeholder.png')
with open(img_path, 'rb') as f:
return base64.b64encode(f.read())
def _check_user_profile_access(self, user_id):
user_sudo = request.env['res.users'].sudo().browse(user_id)
# User can access - no matter what - his own profile
if user_sudo.id == request.env.user.id:
return user_sudo
if user_sudo.karma == 0 or not user_sudo.website_published or \
(user_sudo.id != request.session.uid and request.env.user.karma < request.website.karma_profile_min):
return False
return user_sudo
def _prepare_user_values(self, **kwargs):
kwargs.pop('edit_translations', None) # avoid nuking edit_translations
values = {
'user': request.env.user,
'is_public_user': request.website.is_public_user(),
'validation_email_sent': request.session.get('validation_email_sent', False),
'validation_email_done': request.session.get('validation_email_done', False),
}
values.update(kwargs)
return values
def _prepare_user_profile_parameters(self, **post):
return post
def _prepare_user_profile_values(self, user, **post):
return {
'uid': request.env.user.id,
'user': user,
'main_object': user,
'is_profile_page': True,
'edit_button_url_param': '',
}
@http.route([
'/profile/avatar/<int:user_id>',
], type='http', auth="public", website=True, sitemap=False)
def get_user_profile_avatar(self, user_id, field='avatar_256', width=0, height=0, crop=False, **post):
if field not in ('image_128', 'image_256', 'avatar_128', 'avatar_256'):
return werkzeug.exceptions.Forbidden()
can_sudo = self._check_avatar_access(user_id, **post)
if can_sudo:
status, headers, image_base64 = request.env['ir.http'].sudo().binary_content(
model='res.users', id=user_id, field=field,
default_mimetype='image/png')
else:
status, headers, image_base64 = request.env['ir.http'].binary_content(
model='res.users', id=user_id, field=field,
default_mimetype='image/png')
if status == 301:
return request.env['ir.http']._response_by_status(status, headers, image_base64)
if status == 304:
return werkzeug.wrappers.Response(status=304)
if not image_base64:
image_base64 = self._get_default_avatar()
if not (width or height):
width, height = tools.image_guess_size_from_field_name(field)
image_base64 = tools.image_process(image_base64, size=(int(width), int(height)), crop=crop)
content = base64.b64decode(image_base64)
headers = http.set_safe_image_headers(headers, content)
response = request.make_response(content, headers)
response.status_code = status
return response
@http.route(['/profile/user/<int:user_id>'], type='http', auth="public", website=True)
def view_user_profile(self, user_id, **post):
user = self._check_user_profile_access(user_id)
if not user:
return request.render("website_profile.private_profile")
values = self._prepare_user_values(**post)
params = self._prepare_user_profile_parameters(**post)
values.update(self._prepare_user_profile_values(user, **params))
return request.render("website_profile.user_profile_main", values)
# Edit Profile
# ---------------------------------------------------
@http.route('/profile/edit', type='http', auth="user", website=True)
def view_user_profile_edition(self, **kwargs):
user_id = int(kwargs.get('user_id', 0))
countries = request.env['res.country'].search([])
if user_id and request.env.user.id != user_id and request.env.user._is_admin():
user = request.env['res.users'].browse(user_id)
values = self._prepare_user_values(searches=kwargs, user=user, is_public_user=False)
else:
values = self._prepare_user_values(searches=kwargs)
values.update({
'email_required': kwargs.get('email_required'),
'countries': countries,
'url_param': kwargs.get('url_param'),
})
return request.render("website_profile.user_profile_edit_main", values)
def _profile_edition_preprocess_values(self, user, **kwargs):
values = {
'name': kwargs.get('name'),
'website': kwargs.get('website'),
'email': kwargs.get('email'),
'city': kwargs.get('city'),
'country_id': int(kwargs.get('country')) if kwargs.get('country') else False,
'website_description': kwargs.get('description'),
}
if 'clear_image' in kwargs:
values['image_1920'] = False
elif kwargs.get('ufile'):
image = kwargs.get('ufile').read()
values['image_1920'] = base64.b64encode(image)
if request.uid == user.id: # the controller allows to edit only its own privacy settings; use partner management for other cases
values['website_published'] = kwargs.get('website_published') == 'True'
return values
@http.route('/profile/user/save', type='http', auth="user", methods=['POST'], website=True)
def save_edited_profile(self, **kwargs):
user_id = int(kwargs.get('user_id', 0))
if user_id and request.env.user.id != user_id and request.env.user._is_admin():
user = request.env['res.users'].browse(user_id)
else:
user = request.env.user
values = self._profile_edition_preprocess_values(user, **kwargs)
whitelisted_values = {key: values[key] for key in user.SELF_WRITEABLE_FIELDS if key in values}
user.write(whitelisted_values)
if kwargs.get('url_param'):
return request.redirect("/profile/user/%d?%s" % (user.id, kwargs['url_param']))
else:
return request.redirect("/profile/user/%d" % user.id)
# Ranks and Badges
# ---------------------------------------------------
def _prepare_badges_domain(self, **kwargs):
"""
Hook for other modules to restrict the badges showed on profile page, depending of the context
"""
domain = [('website_published', '=', True)]
if 'badge_category' in kwargs:
domain = expression.AND([[('challenge_ids.challenge_category', '=', kwargs.get('badge_category'))], domain])
return domain
def _prepare_ranks_badges_values(self, **kwargs):
ranks = []
if 'badge_category' not in kwargs:
Rank = request.env['gamification.karma.rank']
ranks = Rank.sudo().search([], order='karma_min DESC')
Badge = request.env['gamification.badge']
badges = Badge.sudo().search(self._prepare_badges_domain(**kwargs))
badges = badges.sorted("granted_users_count", reverse=True)
values = self._prepare_user_values(searches={'badges': True})
values.update({
'ranks': ranks,
'badges': badges,
'user': request.env.user,
})
return values
@http.route('/profile/ranks_badges', type='http', auth="public", website=True, sitemap=True)
def view_ranks_badges(self, **kwargs):
values = self._prepare_ranks_badges_values(**kwargs)
return request.render("website_profile.rank_badge_main", values)
# All Users Page
# ---------------------------------------------------
def _prepare_all_users_values(self, users):
user_values = []
for user in users:
user_values.append({
'id': user.id,
'name': user.name,
'company_name': user.company_id.name,
'rank': user.rank_id.name,
'karma': user.karma,
'badge_count': len(user.badge_ids),
'website_published': user.website_published
})
return user_values
@http.route(['/profile/users',
'/profile/users/page/<int:page>'], type='http', auth="public", website=True, sitemap=True)
def view_all_users_page(self, page=1, **kwargs):
User = request.env['res.users']
dom = [('karma', '>', 1), ('website_published', '=', True)]
# Searches
search_term = kwargs.get('search')
group_by = kwargs.get('group_by', False)
render_values = {
'search': search_term,
'group_by': group_by or 'all',
}
if search_term:
dom = expression.AND([['|', ('name', 'ilike', search_term), ('partner_id.commercial_company_name', 'ilike', search_term)], dom])
user_count = User.sudo().search_count(dom)
my_user = request.env.user
current_user_values = False
if user_count:
page_count = math.ceil(user_count / self._users_per_page)
pager = request.website.pager(url="/profile/users", total=user_count, page=page, step=self._users_per_page,
scope=page_count if page_count < self._pager_max_pages else self._pager_max_pages,
url_args=kwargs)
users = User.sudo().search(dom, limit=self._users_per_page, offset=pager['offset'], order='karma DESC')
user_values = self._prepare_all_users_values(users)
# Get karma position for users (only website_published)
position_domain = [('karma', '>', 1), ('website_published', '=', True)]
position_map = self._get_position_map(position_domain, users, group_by)
max_position = max([user_data['karma_position'] for user_data in position_map.values()], default=1)
for user in user_values:
user_data = position_map.get(user['id'], dict())
user['position'] = user_data.get('karma_position', max_position + 1)
user['karma_gain'] = user_data.get('karma_gain_total', 0)
user_values.sort(key=itemgetter('position'))
if my_user.website_published and my_user.karma and my_user.id not in users.ids:
# Need to keep the dom to search only for users that appear in the ranking page
current_user = User.sudo().search(expression.AND([[('id', '=', my_user.id)], dom]))
if current_user:
current_user_values = self._prepare_all_users_values(current_user)[0]
user_data = self._get_position_map(position_domain, current_user, group_by).get(current_user.id, {})
current_user_values['position'] = user_data.get('karma_position', 0)
current_user_values['karma_gain'] = user_data.get('karma_gain_total', 0)
else:
user_values = []
pager = {'page_count': 0}
render_values.update({
'top3_users': user_values[:3] if not search_term and page == 1 else [],
'users': user_values,
'my_user': current_user_values,
'pager': pager,
})
return request.render("website_profile.users_page_main", render_values)
def _get_position_map(self, position_domain, users, group_by):
if group_by:
position_map = self._get_user_tracking_karma_gain_position(position_domain, users.ids, group_by)
else:
position_results = users._get_karma_position(position_domain)
position_map = dict((user_data['user_id'], dict(user_data)) for user_data in position_results)
return position_map
def _get_user_tracking_karma_gain_position(self, domain, user_ids, group_by):
""" Helper method computing boundaries to give to _get_tracking_karma_gain_position.
See that method for more details. """
to_date = fields.Date.today()
if group_by == 'week':
from_date = to_date - relativedelta(weeks=1)
elif group_by == 'month':
from_date = to_date - relativedelta(months=1)
else:
from_date = None
results = request.env['res.users'].browse(user_ids)._get_tracking_karma_gain_position(domain, from_date=from_date, to_date=to_date)
return dict((item['user_id'], dict(item)) for item in results)
# User and validation
# --------------------------------------------------
@http.route('/profile/send_validation_email', type='json', auth='user', website=True)
def send_validation_email(self, **kwargs):
if request.env.uid != request.website.user_id.id:
request.env.user._send_profile_validation_email(**kwargs)
request.session['validation_email_sent'] = True
return True
@http.route('/profile/validate_email', type='http', auth='public', website=True, sitemap=False)
def validate_email(self, token, user_id, email, **kwargs):
done = request.env['res.users'].sudo().browse(int(user_id))._process_profile_validation_token(token, email)
if done:
request.session['validation_email_done'] = True
url = kwargs.get('redirect_url', '/')
return request.redirect(url)
@http.route('/profile/validate_email/close', type='json', auth='public', website=True)
def validate_email_done(self, **kwargs):
request.session['validation_email_done'] = False
return True
| 45.169231 | 14,680 |
1,153 |
py
|
PYTHON
|
15.0
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Website Test',
'version': '1.0',
'category': 'Hidden',
'sequence': 9876,
'summary': 'Website Test, mainly for module install/uninstall tests',
'description': """This module contains tests related to website. Those are
present in a separate module as we are testing module install/uninstall/upgrade
and we don't want to reload the website module every time, including it's possible
dependencies. Neither we want to add in website module some routes, views and
models which only purpose is to run tests.""",
'depends': [
'web_unsplash',
'website',
'theme_default',
],
'data': [
'views/templates.xml',
'data/test_website_data.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
'assets': {
'web.assets_frontend': [
'test_website/static/src/js/test_error.js',
],
'web.assets_tests': [
'test_website/static/tests/**/*',
],
},
'license': 'LGPL-3',
}
| 32.027778 | 1,153 |
3,392 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
import lxml
@odoo.tests.common.tagged('post_install', '-at_install')
class TestIsMultiLang(odoo.tests.HttpCase):
def test_01_is_multilang_url(self):
website = self.env['website'].search([], limit=1)
fr = self.env.ref('base.lang_fr').sudo()
en = self.env.ref('base.lang_en').sudo()
fr.active = True
fr_prefix = "/" + fr.iso_code
website.default_lang_id = en
website.language_ids = en + fr
for data in [None, {'post': True}]: # GET / POST
body = lxml.html.fromstring(self.url_open('/fr/multi_url', data=data).content)
self.assertEqual(fr_prefix + '/get', body.find('./a[@id="get"]').get('href'))
self.assertEqual(fr_prefix + '/post', body.find('./form[@id="post"]').get('action'))
self.assertEqual(fr_prefix + '/get_post', body.find('./a[@id="get_post"]').get('href'))
self.assertEqual('/get_post_nomultilang', body.find('./a[@id="get_post_nomultilang"]').get('href'))
def test_02_url_lang_code_underscore(self):
website = self.env['website'].browse(1)
it = self.env.ref('base.lang_it').sudo()
en = self.env.ref('base.lang_en').sudo()
be = self.env.ref('base.lang_fr_BE').sudo()
country1 = self.env['res.country'].create({'name': "My Super Country"})
it.active = True
be.active = True
website.domain = 'http://127.0.0.1:8069' # for _is_canonical_url
website.default_lang_id = en
website.language_ids = en + it + be
params = {
'src': country1.name,
'value': country1.name + ' Italia',
'type': 'model',
'name': 'res.country,name',
'res_id': country1.id,
'lang': it.code,
'state': 'translated',
}
self.env['ir.translation'].create(params)
params.update({
'value': country1.name + ' Belgium',
'lang': be.code,
})
self.env['ir.translation'].create(params)
r = self.url_open('/test_lang_url/%s' % country1.id)
self.assertEqual(r.status_code, 200)
self.assertTrue(r.url.endswith('/test_lang_url/my-super-country-%s' % country1.id))
r = self.url_open('/%s/test_lang_url/%s' % (it.url_code, country1.id))
self.assertEqual(r.status_code, 200)
self.assertTrue(r.url.endswith('/%s/test_lang_url/my-super-country-italia-%s' % (it.url_code, country1.id)))
body = lxml.html.fromstring(r.content)
# Note: this test is indirectly testing the `ref=canonical` tag is correctly set,
# as it is required in order for `rel=alternate` tags to be inserted in the DOM
it_href = body.find('./head/link[@rel="alternate"][@hreflang="it"]').get('href')
fr_href = body.find('./head/link[@rel="alternate"][@hreflang="fr"]').get('href')
en_href = body.find('./head/link[@rel="alternate"][@hreflang="en"]').get('href')
self.assertTrue(it_href.endswith('/%s/test_lang_url/my-super-country-italia-%s' % (it.url_code, country1.id)))
self.assertTrue(fr_href.endswith('/%s/test_lang_url/my-super-country-belgium-%s' % (be.url_code, country1.id)))
self.assertTrue(en_href.endswith('/test_lang_url/my-super-country-%s' % country1.id))
| 47.774648 | 3,392 |
9,281 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website.tools import MockRequest
from odoo.tests import standalone
@standalone('cow_views', 'website_standalone')
def test_01_cow_views_unlink_on_module_update(env):
""" Ensure COW views are correctly removed during module update.
Not removing the view could lead to traceback:
- Having a view A
- Having a view B that inherits from a view C
- View B t-call view A
- COW view B
- Delete view A and B from module datas and update it
- Rendering view C will crash since it will render child view B that
t-call unexisting view A
"""
View = env['ir.ui.view']
Imd = env['ir.model.data']
update_module_base_view = env.ref('test_website.update_module_base_view')
update_module_view_to_be_t_called = View.create({
'name': 'View to be t-called',
'type': 'qweb',
'arch': '<div>I will be t-called</div>',
'key': 'test_website.update_module_view_to_be_t_called',
})
update_module_child_view = View.create({
'name': 'Child View',
'mode': 'extension',
'inherit_id': update_module_base_view.id,
'arch': '''
<div position="inside">
<t t-call="test_website.update_module_view_to_be_t_called"/>
</div>
''',
'key': 'test_website.update_module_child_view',
})
# Create IMD so when updating the module the views will be removed (not found in file)
Imd.create({
'module': 'test_website',
'name': 'update_module_view_to_be_t_called',
'model': 'ir.ui.view',
'res_id': update_module_view_to_be_t_called.id,
})
Imd.create({
'module': 'test_website',
'name': 'update_module_child_view',
'model': 'ir.ui.view',
'res_id': update_module_child_view.id,
})
# Trigger COW on child view
update_module_child_view.with_context(website_id=1).write({'name': 'Child View (W1)'})
# Ensure views are correctly setup
msg = "View '%s' does not exist!"
assert View.search_count([
('type', '=', 'qweb'),
('key', '=', update_module_child_view.key)
]) == 2, msg % update_module_child_view.key
assert bool(env.ref(update_module_view_to_be_t_called.key)),\
msg % update_module_view_to_be_t_called.key
assert bool(env.ref(update_module_base_view.key)), msg % update_module_base_view.key
# Upgrade the module
test_website_module = env['ir.module.module'].search([('name', '=', 'test_website')])
test_website_module.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# Ensure generic views got removed
view = env.ref('test_website.update_module_view_to_be_t_called', raise_if_not_found=False)
assert not view, "Generic view did not get removed!"
# Ensure specific COW views got removed
assert not env['ir.ui.view'].search_count([
('type', '=', 'qweb'),
('key', '=', 'test_website.update_module_child_view'),
]), "Specific COW views did not get removed!"
@standalone('theme_views', 'website_standalone')
def test_02_copy_ids_views_unlink_on_module_update(env):
""" Ensure copy_ids views are correctly removed during module update.
- Having an ir.ui.view A in the codebase, eg `website.layout`
- Having a theme.ir.ui.view B in a theme, inheriting ir.ui.view A
- Removing the theme.ir.ui.view B from the XML file and then updating the
theme for a particular website should:
1. Remove the theme.ir.ui.view record, which is the record pointed by the
ir.model.data
-> This is done through the regular Odoo behavior related to the
ir.model.data and XML file check on upgrade.
2. Remove the theme.ir.ui.view's copy_ids (sort of the COW views)
-> Not working for now
3. (not impact other website using this theme, see below)
-> This is done through odoo/odoo@96ef4885a79 but did not come with
tests
Point 2. was not working, this test aims to ensure it will now.
Note: This can't be done through a `ondelete=cascade` as this would
impact other websites when modifying a specific website. This would
be against the multi-website rule:
"What is done on a website should not alter other websites."
Regarding the flow described above, if a theme module was updated
through the command line (or via the UI, but this is not possible in
standard as theme modules are hidden from the Apps), it should
update every website using this theme.
"""
View = env['ir.ui.view']
ThemeView = env['theme.ir.ui.view']
Imd = env['ir.model.data']
website_1 = env['website'].browse(1)
website_2 = env['website'].browse(2)
theme_default = env.ref('base.module_theme_default')
# Install theme_default on website 1 and website 2
(website_1 + website_2).theme_id = theme_default
env['ir.module.module'].with_context(load_all_views=True)._theme_load(website_1)
env['ir.module.module'].with_context(load_all_views=True)._theme_load(website_2)
key = 'theme_default.theme_child_view'
domain = [
('type', '=', 'qweb'),
('key', '=', key),
]
def _simulate_xml_view():
# Simulate a theme.ir.ui.view inside theme_default XML files
base_view = env.ref('test_website.update_module_base_view')
theme_child_view = ThemeView.create({
'name': 'Theme Child View',
'mode': 'extension',
'inherit_id': f'{base_view._name},{base_view.id}',
'arch': '''
<div position="inside">
<p>, and I am inherited by a theme.ir.ui.view</p>
</div>
''',
'key': key,
})
# Create IMD so when updating the module the views will be removed (not found in file)
Imd.create({
'module': 'theme_default',
'name': 'theme_child_view',
'model': 'theme.ir.ui.view',
'res_id': theme_child_view.id,
})
# Simulate the theme.ir.ui.view being installed on website 1 and 2
View.create([
theme_child_view._convert_to_base_model(website_1),
theme_child_view._convert_to_base_model(website_2),
])
# Ensure views are correctly setup: the theme.ir.ui.view should have been
# copied to an ir.ui.view for website 1
view_website_1, view_website_2 = View.search(domain + [
('theme_template_id', '=', theme_child_view.id),
('website_id', 'in', (website_1 + website_2).ids),
])
assert (
set((view_website_1 + view_website_2)).issubset(theme_child_view.copy_ids)
and view_website_1.website_id == website_1
and view_website_2.website_id == website_2
), "Theme View should have been copied to the website."
return view_website_1, view_website_2, theme_child_view
##########################################
# CASE 1: generic update (-u, migration) #
##########################################
view_website_1, view_website_2, theme_child_view = _simulate_xml_view()
# Upgrade the module
theme_default.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# Ensure the theme.ir.ui.view got removed (since there is an IMD but not
# present in XML files)
view = env.ref('theme_default.theme_child_view', False)
assert not view, "Theme view should have been removed during module update."
assert not theme_child_view.exists(),\
"Theme view should have been removed during module update. (2)"
# Ensure copy_ids view got removed (and is not a leftover orphan)
assert not View.search(domain), "copy_ids views did not get removed!"
assert not (view_website_1.exists() or view_website_2.exists()),\
"copy_ids views did not get removed! (2)"
#####################################################
# CASE 2: specific update (website theme selection) #
#####################################################
view_website_1, view_website_2, theme_child_view = _simulate_xml_view()
# Upgrade the module
with MockRequest(env, website=website_1):
theme_default.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# Ensure the theme.ir.ui.view got removed (since there is an IMD but not
# present in XML files)
view = env.ref('theme_default.theme_child_view', False)
assert not view, "Theme view should have been removed during module update."
assert not theme_child_view.exists(),\
"Theme view should have been removed during module update. (2)"
# Ensure only website_1 copy_ids got removed, website_2 should be untouched
assert not view_website_1.exists() and view_website_2.exists(),\
"Only website_1 copy should be removed (2)"
| 42.378995 | 9,281 |
10,597 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
from odoo.tests import HttpCase, tagged
from odoo.tests.common import HOST
from odoo.tools import mute_logger
from odoo.addons.http_routing.models.ir_http import slug
from unittest.mock import patch
@tagged('-at_install', 'post_install')
class TestRedirect(HttpCase):
def setUp(self):
super(TestRedirect, self).setUp()
self.user_portal = self.env['res.users'].with_context({'no_reset_password': True}).create({
'name': 'Test Website Portal User',
'login': 'portal_user',
'password': 'portal_user',
'email': '[email protected]',
'groups_id': [(6, 0, [self.env.ref('base.group_portal').id])]
})
self.base_url = "http://%s:%s" % (HOST, odoo.tools.config['http_port'])
def test_01_redirect_308_model_converter(self):
self.env['website.rewrite'].create({
'name': 'Test Website Redirect',
'redirect_type': '308',
'url_from': '/test_website/country/<model("res.country"):country>',
'url_to': '/redirected/country/<model("res.country"):country>',
})
country_ad = self.env.ref('base.ad')
""" Ensure 308 redirect with model converter works fine, including:
- Correct & working redirect as public user
- Correct & working redirect as logged in user
- Correct replace of url_for() URLs in DOM
"""
url = '/test_website/country/' + slug(country_ad)
redirect_url = url.replace('test_website', 'redirected')
# [Public User] Open the original url and check redirect OK
r = self.url_open(url)
self.assertEqual(r.status_code, 200)
self.assertTrue(r.url.endswith(redirect_url), "Ensure URL got redirected")
self.assertTrue(country_ad.name in r.text, "Ensure the controller returned the expected value")
self.assertTrue(redirect_url in r.text, "Ensure the url_for has replaced the href URL in the DOM")
# [Logged In User] Open the original url and check redirect OK
self.authenticate("portal_user", "portal_user")
r = self.url_open(url)
self.assertEqual(r.status_code, 200)
self.assertTrue(r.url.endswith(redirect_url), "Ensure URL got redirected (2)")
self.assertTrue('Logged In' in r.text, "Ensure logged in")
self.assertTrue(country_ad.name in r.text, "Ensure the controller returned the expected value (2)")
self.assertTrue(redirect_url in r.text, "Ensure the url_for has replaced the href URL in the DOM")
@mute_logger('odoo.addons.http_routing.models.ir_http') # mute 403 warning
def test_02_redirect_308_RequestUID(self):
self.env['website.rewrite'].create({
'name': 'Test Website Redirect',
'redirect_type': '308',
'url_from': '/test_website/200/<model("test.model"):rec>',
'url_to': '/test_website/308/<model("test.model"):rec>',
})
rec_published = self.env['test.model'].create({'name': 'name', 'website_published': True})
rec_unpublished = self.env['test.model'].create({'name': 'name', 'website_published': False})
WebsiteHttp = odoo.addons.website.models.ir_http.Http
def _get_error_html(env, code, value):
return str(code).split('_')[-1], "CUSTOM %s" % code
with patch.object(WebsiteHttp, '_get_error_html', _get_error_html):
# Patch will avoid to display real 404 page and regenerate assets each time and unlink old one.
# And it allow to be sur that exception id handled by handle_exception and return a "managed error" page.
# published
resp = self.url_open("/test_website/200/name-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/name-%s" % rec_published.id)
resp = self.url_open("/test_website/308/name-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 200)
resp = self.url_open("/test_website/200/xx-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/xx-%s" % rec_published.id)
resp = self.url_open("/test_website/308/xx-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 301)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/name-%s" % rec_published.id)
resp = self.url_open("/test_website/200/xx-%s" % rec_published.id, allow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.url, self.base_url + "/test_website/308/name-%s" % rec_published.id)
# unexisting
resp = self.url_open("/test_website/200/name-100", allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/name-100")
resp = self.url_open("/test_website/308/name-100", allow_redirects=False)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.text, "CUSTOM 404")
resp = self.url_open("/test_website/200/xx-100", allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/xx-100")
resp = self.url_open("/test_website/308/xx-100", allow_redirects=False)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.text, "CUSTOM 404")
# unpublish
resp = self.url_open("/test_website/200/name-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/name-%s" % rec_unpublished.id)
resp = self.url_open("/test_website/308/name-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.text, "CUSTOM 403")
resp = self.url_open("/test_website/200/xx-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/xx-%s" % rec_unpublished.id)
resp = self.url_open("/test_website/308/xx-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.text, "CUSTOM 403")
# with seo_name as slug
rec_published.seo_name = "seo_name"
rec_unpublished.seo_name = "seo_name"
resp = self.url_open("/test_website/200/seo-name-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/seo-name-%s" % rec_published.id)
resp = self.url_open("/test_website/308/seo-name-%s" % rec_published.id, allow_redirects=False)
self.assertEqual(resp.status_code, 200)
resp = self.url_open("/test_website/200/xx-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/xx-%s" % rec_unpublished.id)
resp = self.url_open("/test_website/308/xx-%s" % rec_unpublished.id, allow_redirects=False)
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.text, "CUSTOM 403")
resp = self.url_open("/test_website/200/xx-100", allow_redirects=False)
self.assertEqual(resp.status_code, 308)
self.assertEqual(resp.headers.get('Location'), self.base_url + "/test_website/308/xx-100")
resp = self.url_open("/test_website/308/xx-100", allow_redirects=False)
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.text, "CUSTOM 404")
def test_03_redirect_308_qs(self):
self.env['website.rewrite'].create({
'name': 'Test QS Redirect',
'redirect_type': '308',
'url_from': '/empty_controller_test',
'url_to': '/empty_controller_test_redirected',
})
r = self.url_open('/test_website/test_redirect_view_qs?a=a')
self.assertEqual(r.status_code, 200)
self.assertIn(
'href="/empty_controller_test_redirected?a=a"', r.text,
"Redirection should have been applied, and query string should not have been duplicated.",
)
@mute_logger('odoo.addons.http_routing.models.ir_http') # mute 403 warning
def test_04_redirect_301_route_unpublished_record(self):
# 1. Accessing published record: Normal case, expecting 200
rec1 = self.env['test.model'].create({
'name': '301 test record',
'is_published': True,
})
url_rec1 = '/test_website/200/' + slug(rec1)
r = self.url_open(url_rec1)
self.assertEqual(r.status_code, 200)
# 2. Accessing unpublished record: expecting 403 by default
rec1.is_published = False
r = self.url_open(url_rec1)
self.assertEqual(r.status_code, 403)
# 3. Accessing unpublished record with redirect to a 404: expecting 404
redirect = self.env['website.rewrite'].create({
'name': 'Test 301 Redirect route unpublished record',
'redirect_type': '301',
'url_from': url_rec1,
'url_to': '/404',
})
r = self.url_open(url_rec1)
self.assertEqual(r.status_code, 404)
# 4. Accessing unpublished record with redirect to another published
# record: expecting redirect to that record
rec2 = rec1.copy({'is_published': True})
url_rec2 = '/test_website/200/' + slug(rec2)
redirect.url_to = url_rec2
r = self.url_open(url_rec1)
self.assertEqual(r.status_code, 200)
self.assertTrue(
r.url.endswith(url_rec2),
"Unpublished record should redirect to published record set in redirect")
| 49.751174 | 10,597 |
1,599 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteControllerArgs(odoo.tests.HttpCase):
def test_crawl_args(self):
req = self.url_open('/ignore_args/converter/valueA/?b=valueB&c=valueC')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': 'valueA', 'b': 'valueB', 'kw': {'c': 'valueC'}})
req = self.url_open('/ignore_args/converter/valueA/nokw?b=valueB&c=valueC')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': 'valueA', 'b': 'valueB'})
req = self.url_open('/ignore_args/converteronly/valueA/?b=valueB&c=valueC')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': 'valueA', 'kw': None})
req = self.url_open('/ignore_args/none?a=valueA&b=valueB')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': None, 'kw': None})
req = self.url_open('/ignore_args/a?a=valueA&b=valueB')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': 'valueA', 'kw': None})
req = self.url_open('/ignore_args/kw?a=valueA&b=valueB')
self.assertEqual(req.status_code, 200)
self.assertEqual(req.json(), {'a': 'valueA', 'kw': {'b': 'valueB'}})
req = self.url_open('/test_website/country/whatever-999999')
self.assertEqual(req.status_code, 404,
"Model converter record does not exist, return a 404.")
| 45.685714 | 1,599 |
431 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo.tools import mute_logger
@odoo.tests.common.tagged('post_install', '-at_install')
class TestCustomSnippet(odoo.tests.HttpCase):
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.http')
def test_01_run_tour(self):
self.start_tour("/", 'test_custom_snippet', login="admin")
| 33.153846 | 431 |
595 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import HttpCase, tagged
@tagged('post_install', '-at_install')
class TestMultiCompany(HttpCase):
def test_company_in_context(self):
""" Test website company is set in context """
website = self.env.ref('website.default_website')
company = self.env['res.company'].create({'name': "Adaa"})
website.company_id = company
response = self.url_open('/multi_company_website')
self.assertEqual(response.json()[0], company.id)
| 37.1875 | 595 |
239 |
py
|
PYTHON
|
15.0
|
import odoo.tests
from odoo.tools import mute_logger
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteSession(odoo.tests.HttpCase):
def test_01_run_test(self):
self.start_tour('/', 'test_json_auth')
| 26.555556 | 239 |
6,099 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import odoo.tests
from odoo.tools import mute_logger
def break_view(view, fr='<p>placeholder</p>', to='<p t-field="no_record.exist"/>'):
view.arch = view.arch.replace(fr, to)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteResetViews(odoo.tests.HttpCase):
def fix_it(self, page, mode='soft'):
self.authenticate("admin", "admin")
resp = self.url_open(page)
self.assertEqual(resp.status_code, 500, "Waiting 500")
self.assertTrue('<button data-mode="soft" class="reset_templates_button' in resp.text)
data = {'view_id': self.find_template(resp), 'redirect': page, 'mode': mode}
resp = self.url_open('/website/reset_template', data)
self.assertEqual(resp.status_code, 200, "Waiting 200")
def find_template(self, response):
find = re.search(r'<input.*type="hidden".*name="view_id".*value="([0-9]+)?"', response.text)
return find and find.group(1)
def setUp(self):
super(TestWebsiteResetViews, self).setUp()
self.Website = self.env['website']
self.View = self.env['ir.ui.view']
self.test_view = self.Website.viewref('test_website.test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_01_reset_specific_page_view(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_02_reset_specific_view_controller(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
# `t-att-data="no_record.exist"` will test the case where exception.html contains branding
break_view(self.test_view.with_context(website_id=1), to='<p t-att-data="no_record.exist" />')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_03_reset_specific_view_controller_t_called(self):
self.test_view_to_be_t_called = self.Website.viewref('test_website.test_view_to_be_t_called')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view_to_be_t_called.with_context(website_id=1))
break_view(self.test_view, to='<t t-call="test_website.test_view_to_be_t_called"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_04_reset_specific_view_controller_inherit(self):
self.test_view_child_broken = self.Website.viewref('test_website.test_view_child_broken')
# Activate and break the inherited view
self.test_view_child_broken.active = True
break_view(self.test_view_child_broken.with_context(website_id=1, load_all_views=True))
self.fix_it('/test_view')
# This test work in real life, but not in test mode since we cannot rollback savepoint.
# @mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
# def test_05_reset_specific_view_controller_broken_request(self):
# total_views = self.View.search_count([('type', '=', 'qweb')])
# # Trigger COW then break the QWEB XML on it
# break_view(self.test_view.with_context(website_id=1), to='<t t-esc="request.env[\'website\'].browse(\'a\').name" />')
# self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (1)")
# self.fix_it('/test_view')
# also mute ir.ui.view as `get_view_id()` will raise "Could not find view object with xml_id 'no_record.exist'""
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.addons.website.models.ir_ui_view')
def test_06_reset_specific_view_controller_inexisting_template(self):
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_view.with_context(website_id=1), to='<t t-call="no_record.exist"/>')
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view (2)")
self.fix_it('/test_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_07_reset_page_view_complete_flow(self):
self.start_tour("/", 'test_reset_page_view_complete_flow_part1', login="admin")
self.fix_it('/test_page_view')
self.start_tour("/", 'test_reset_page_view_complete_flow_part2', login="admin")
self.fix_it('/test_page_view')
@mute_logger('odoo.addons.http_routing.models.ir_http')
def test_08_reset_specific_page_view_hard_mode(self):
self.test_page_view = self.Website.viewref('test_website.test_page_view')
total_views = self.View.search_count([('type', '=', 'qweb')])
# Trigger COW then break the QWEB XML on it
break_view(self.test_page_view.with_context(website_id=1))
# Break it again to have a previous arch different than file arch
break_view(self.test_page_view.with_context(website_id=1))
self.assertEqual(total_views + 1, self.View.search_count([('type', '=', 'qweb')]), "Missing COW view")
with self.assertRaises(AssertionError):
# soft reset should not be able to reset the view as previous
# version is also broken
self.fix_it('/test_page_view')
self.fix_it('/test_page_view', 'hard')
| 54.945946 | 6,099 |
2,601 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.web_editor.controllers.main import Web_Editor
from odoo.addons.web_unsplash.controllers.main import Web_Unsplash
import odoo.tests
from odoo import http
from odoo.tools import config
BASE_URL = "http://127.0.0.1:%s" % (config["http_port"],)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestImageUploadProgress(odoo.tests.HttpCase):
def test_01_image_upload_progress(self):
self.start_tour("/test_image_progress", 'test_image_upload_progress', login="admin")
def test_02_image_upload_progress_unsplash(self):
def media_library_search(self, **params):
return {"results": 0, "media": []}
def fetch_unsplash_images(self, **post):
return {
'total': 1434,
'total_pages': 48,
'results': [{
'id': 'HQqIOc8oYro',
'alt_description': 'brown fox sitting on green grass field during daytime',
'urls': {
# 'regular': 'https://images.unsplash.com/photo-1462953491269-9aff00919695?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwzMDUwOHwwfDF8c2VhcmNofDF8fGZveHxlbnwwfHx8fDE2MzEwMzIzNDE&ixlib=rb-1.2.1&q=80&w=1080',
'regular': BASE_URL + '/website/static/src/img/phone.png',
},
'links': {
# 'download_location': 'https://api.unsplash.com/photos/HQqIOc8oYro/download?ixid=MnwzMDUwOHwwfDF8c2VhcmNofDF8fGZveHxlbnwwfHx8fDE2MzEwMzIzNDE'
'download_location': BASE_URL + '/website/static/src/img/phone.png',
},
'user': {
'name': 'Mitchell Admin',
'links': {
'html': BASE_URL,
},
},
}]
}
# because not preprocessed by ControllerType metaclass
fetch_unsplash_images.routing_type = 'json'
Web_Unsplash.fetch_unsplash_images = http.route("/web_unsplash/fetch_images", type='json', auth="user")(fetch_unsplash_images)
# disable undraw, no third party should be called in tests
media_library_search.routing_type = 'json'
Web_Editor.media_library_search = http.route(['/web_editor/media_library_search'], type='json', auth="user", website=True)(media_library_search)
self.start_tour("/", 'test_image_upload_progress_unsplash', login="admin")
| 45.631579 | 2,601 |
429 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website.tests.test_performance import UtilPerf
class TestPerformance(UtilPerf):
def test_10_perf_sql_website_controller_minimalist(self):
url = '/empty_controller_test'
self.assertEqual(self._get_url_hot_query(url), 1)
self.assertEqual(self._get_url_hot_query(url, cache=False), 1)
| 39 | 429 |
770 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo.tools import mute_logger
@odoo.tests.common.tagged('post_install', '-at_install')
class TestMedia(odoo.tests.HttpCase):
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.http')
def test_01_replace_media(self):
GIF = b"R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs="
self.env['ir.attachment'].create({
'name': 'sample.gif',
'public': True,
'mimetype': 'image/gif',
'datas': GIF,
})
self.start_tour("/", 'test_replace_media', login="admin")
def test_02_image_link(self):
self.start_tour("/", 'test_image_link', login="admin")
| 33.478261 | 770 |
4,497 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import psycopg2
from odoo.addons.website.controllers.main import Website
from odoo.addons.website.tools import MockRequest
import odoo.tests
from odoo.tests.common import TransactionCase
_logger = logging.getLogger(__name__)
@odoo.tests.tagged('-at_install', 'post_install')
class TestAutoComplete(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.website = cls.env['website'].browse(1)
cls.WebsiteController = Website()
def _autocomplete(self, term, expected_count, expected_fuzzy_term):
""" Calls the autocomplete for a given term and performs general checks """
with MockRequest(self.env, website=self.website):
suggestions = self.WebsiteController.autocomplete(
search_type="test", term=term, max_nb_chars=50, options={},
)
self.assertEqual(expected_count, suggestions['results_count'], "Wrong number of suggestions")
self.assertEqual(expected_fuzzy_term, suggestions.get('fuzzy_search', 'Not found'), "Wrong fuzzy match")
def test_01_many_records(self):
# REF1000~REF3999
data = [{
'name': 'REF%s' % count,
'is_published': True,
} for count in range(1000, 4000)]
self.env['test.model'].create(data)
# NUM1000~NUM1998
data = [{
'name': 'NUM%s' % count,
'is_published': True,
} for count in range(1000, 1999)]
self.env['test.model'].create(data)
# There are more than 1000 "R*" records
# => Find exact match through the fallback
self._autocomplete('REF3000', 1, False)
# => No exact match => Find fuzzy within first 1000 (distance=3: replace D by F, move 3, add 1)
self._autocomplete('RED3000', 1, 'ref3000' if self.env.registry.has_trigram else 'ref1003')
# => Find exact match through the fallback
self._autocomplete('REF300', 10, False)
# => Find exact match through the fallback
self._autocomplete('REF1', 1000, False)
# => No exact match => Nothing close enough (min distance=5)
self._autocomplete('REFX', 0, "Not found")
# => Find exact match through the fallback - unfortunate because already in the first 1000 records
self._autocomplete('REF1230', 1, False)
# => Find exact match through the fallback
self._autocomplete('REF2230', 1, False)
# There are less than 1000 "N*" records
# => Fuzzy within N* (distance=1: add 1)
self._autocomplete('NUM000', 1, "num1000")
# => Exact match (distance=0 shortcut logic)
self._autocomplete('NUM100', 10, False)
# => Exact match (distance=0 shortcut logic)
self._autocomplete('NUM199', 9, False)
# => Exact match (distance=0 shortcut logic)
self._autocomplete('NUM1998', 1, False)
# => Fuzzy within N* (distance=1: replace 1 by 9)
self._autocomplete('NUM1999', 1, 'num1199')
# => Fuzzy within N* (distance=1: add 1)
self._autocomplete('NUM200', 1, 'num1200')
# There are no "X*" records
self._autocomplete('XEF1000', 0, "Not found")
def test_02_pages_search(self):
if not self.env.registry.has_trigram:
try:
self.env.cr.execute("CREATE EXTENSION IF NOT EXISTS pg_trgm")
self.env.registry.has_trigram = True
except psycopg2.Error:
_logger.warning("pg_trgm extension can't be installed, which is required to run this test")
return
with MockRequest(self.env, website=self.env['website'].browse(1)):
# This should not crash. This ensures that when searching on `name`
# field of `website.page` model, it works properly when `pg_trgm` is
# activated.
# Indeed, `name` is a field of `website.page` record but only at the
# ORM level, not in SQL, due to how `inherits` works.
self.env['website'].browse(1)._search_with_fuzzy(
'pages', 'test', limit=5, order='name asc, website_id desc, id', options={
'displayDescription': False, 'displayDetail': False,
'displayExtraDetail': False, 'displayExtraLink': False,
'displayImage': False, 'allowFuzzy': True
}
)
| 45.887755 | 4,497 |
329 |
py
|
PYTHON
|
15.0
|
import odoo.tests
from odoo.tools import mute_logger
@odoo.tests.common.tagged('post_install', '-at_install')
class TestWebsiteError(odoo.tests.HttpCase):
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.http')
def test_01_run_test(self):
self.start_tour("/test_error_view", 'test_error_website')
| 32.9 | 329 |
973 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class TestModel(models.Model):
""" Add website option in server actions. """
_name = 'test.model'
_inherit = [
'website.seo.metadata',
'website.published.mixin',
'website.searchable.mixin',
]
_description = 'Website Model Test'
name = fields.Char(required=1)
@api.model
def _search_get_detail(self, website, order, options):
return {
'model': 'test.model',
'base_domain': [],
'search_fields': ['name'],
'fetch_fields': ['name'],
'mapping': {
'name': {'name': 'name', 'type': 'text', 'match': True},
'website_url': {'name': 'name', 'type': 'text', 'truncate': False},
},
'icon': 'fa-check-square-o',
'order': 'name asc, id desc',
}
| 29.484848 | 973 |
468 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class Website(models.Model):
_inherit = "website"
def _search_get_details(self, search_type, order, options):
result = super()._search_get_details(search_type, order, options)
if search_type in ['test']:
result.append(self.env['test.model']._search_get_detail(self, order, options))
return result
| 33.428571 | 468 |
7,007 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import werkzeug
from odoo import http
from odoo.http import request
from odoo.addons.portal.controllers.web import Home
from odoo.exceptions import UserError, ValidationError, AccessError, MissingError, AccessDenied
class WebsiteTest(Home):
@http.route('/test_view', type='http', auth='public', website=True, sitemap=False)
def test_view(self, **kwargs):
return request.render('test_website.test_view')
@http.route('/ignore_args/converteronly/<string:a>', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_converter_only(self, a):
return request.make_response(json.dumps(dict(a=a, kw=None)))
@http.route('/ignore_args/none', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_none(self):
return request.make_response(json.dumps(dict(a=None, kw=None)))
@http.route('/ignore_args/a', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_a(self, a):
return request.make_response(json.dumps(dict(a=a, kw=None)))
@http.route('/ignore_args/kw', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_kw(self, a, **kw):
return request.make_response(json.dumps(dict(a=a, kw=kw)))
@http.route('/ignore_args/converter/<string:a>', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_converter(self, a, b='youhou', **kw):
return request.make_response(json.dumps(dict(a=a, b=b, kw=kw)))
@http.route('/ignore_args/converter/<string:a>/nokw', type='http', auth="public", website=True, sitemap=False)
def test_ignore_args_converter_nokw(self, a, b='youhou'):
return request.make_response(json.dumps(dict(a=a, b=b)))
@http.route('/multi_company_website', type='http', auth="public", website=True, sitemap=False)
def test_company_context(self):
return request.make_response(json.dumps(request.context.get('allowed_company_ids')))
@http.route('/test_lang_url/<model("res.country"):country>', type='http', auth='public', website=True, sitemap=False)
def test_lang_url(self, **kwargs):
return request.render('test_website.test_view')
# Test Session
@http.route('/test_get_dbname', type='json', auth='public', website=True, sitemap=False)
def test_get_dbname(self, **kwargs):
return request.env.cr.dbname
# Test Error
@http.route('/test_error_view', type='http', auth='public', website=True, sitemap=False)
def test_error_view(self, **kwargs):
return request.render('test_website.test_error_view')
@http.route('/test_user_error_http', type='http', auth='public', website=True, sitemap=False)
def test_user_error_http(self, **kwargs):
raise UserError("This is a user http test")
@http.route('/test_user_error_json', type='json', auth='public', website=True, sitemap=False)
def test_user_error_json(self, **kwargs):
raise UserError("This is a user rpc test")
@http.route('/test_validation_error_http', type='http', auth='public', website=True, sitemap=False)
def test_validation_error_http(self, **kwargs):
raise ValidationError("This is a validation http test")
@http.route('/test_validation_error_json', type='json', auth='public', website=True, sitemap=False)
def test_validation_error_json(self, **kwargs):
raise ValidationError("This is a validation rpc test")
@http.route('/test_access_error_json', type='json', auth='public', website=True, sitemap=False)
def test_access_error_json(self, **kwargs):
raise AccessError("This is an access rpc test")
@http.route('/test_access_error_http', type='http', auth='public', website=True, sitemap=False)
def test_access_error_http(self, **kwargs):
raise AccessError("This is an access http test")
@http.route('/test_missing_error_json', type='json', auth='public', website=True, sitemap=False)
def test_missing_error_json(self, **kwargs):
raise MissingError("This is a missing rpc test")
@http.route('/test_missing_error_http', type='http', auth='public', website=True, sitemap=False)
def test_missing_error_http(self, **kwargs):
raise MissingError("This is a missing http test")
@http.route('/test_internal_error_json', type='json', auth='public', website=True, sitemap=False)
def test_internal_error_json(self, **kwargs):
raise werkzeug.exceptions.InternalServerError()
@http.route('/test_internal_error_http', type='http', auth='public', website=True, sitemap=False)
def test_internal_error_http(self, **kwargs):
raise werkzeug.exceptions.InternalServerError()
@http.route('/test_access_denied_json', type='json', auth='public', website=True, sitemap=False)
def test_denied_error_json(self, **kwargs):
raise AccessDenied("This is an access denied rpc test")
@http.route('/test_access_denied_http', type='http', auth='public', website=True, sitemap=False)
def test_denied_error_http(self, **kwargs):
raise AccessDenied("This is an access denied http test")
@http.route(['/get'], type='http', auth="public", methods=['GET'], website=True, sitemap=False)
def get_method(self, **kw):
return request.make_response('get')
@http.route(['/post'], type='http', auth="public", methods=['POST'], website=True, sitemap=False)
def post_method(self, **kw):
return request.make_response('post')
@http.route(['/get_post'], type='http', auth="public", methods=['GET', 'POST'], website=True, sitemap=False)
def get_post_method(self, **kw):
return request.make_response('get_post')
@http.route(['/get_post_nomultilang'], type='http', auth="public", methods=['GET', 'POST'], website=True, multilang=False, sitemap=False)
def get_post_method_no_multilang(self, **kw):
return request.make_response('get_post_nomultilang')
# Test Perfs
@http.route(['/empty_controller_test'], type='http', auth='public', website=True, multilang=False, sitemap=False)
def empty_controller_test(self, **kw):
return 'Basic Controller Content'
# Test Redirects
@http.route(['/test_website/country/<model("res.country"):country>'], type='http', auth="public", website=True, sitemap=False)
def test_model_converter_country(self, country, **kw):
return request.render('test_website.test_redirect_view', {'country': country})
@http.route(['/test_website/200/<model("test.model"):rec>'], type='http', auth="public", website=True, sitemap=False)
def test_model_converter_seoname(self, rec, **kw):
return request.make_response('ok')
@http.route(['/test_website/test_redirect_view_qs'], type='http', auth="public", website=True, sitemap=False)
def test_redirect_view_qs(self, **kw):
return request.render('test_website.test_redirect_view_qs')
| 48.659722 | 7,007 |
482 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Authentication via LDAP',
'depends': ['base', 'base_setup'],
#'description': < auto-loaded from README file
'category': 'Hidden/Tools',
'data': [
'views/ldap_installer_views.xml',
'security/ir.model.access.csv',
'views/res_config_settings_views.xml',
],
'external_dependencies': {
'python': ['ldap'],
},
'license': 'LGPL-3',
}
| 28.352941 | 482 |
351 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResCompany(models.Model):
_inherit = "res.company"
ldaps = fields.One2many('res.company.ldap', 'company', string='LDAP Parameters',
copy=True, groups="base.group_system")
| 31.909091 | 351 |
2,334 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import AccessDenied
from odoo import api, models, registry, SUPERUSER_ID
class Users(models.Model):
_inherit = "res.users"
@classmethod
def _login(cls, db, login, password, user_agent_env):
try:
return super(Users, cls)._login(db, login, password, user_agent_env=user_agent_env)
except AccessDenied as e:
with registry(db).cursor() as cr:
cr.execute("SELECT id FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
raise e
env = api.Environment(cr, SUPERUSER_ID, {})
Ldap = env['res.company.ldap']
for conf in Ldap._get_ldap_dicts():
entry = Ldap._authenticate(conf, login, password)
if entry:
return Ldap._get_or_create_user(conf, login, entry)
raise e
def _check_credentials(self, password, env):
try:
return super(Users, self)._check_credentials(password, env)
except AccessDenied:
passwd_allowed = env['interactive'] or not self.env.user._rpc_api_keys_only()
if passwd_allowed and self.env.user.active:
Ldap = self.env['res.company.ldap']
for conf in Ldap._get_ldap_dicts():
if Ldap._authenticate(conf, self.env.user.login, password):
return
raise
@api.model
def change_password(self, old_passwd, new_passwd):
if new_passwd:
Ldap = self.env['res.company.ldap']
for conf in Ldap._get_ldap_dicts():
changed = Ldap._change_password(conf, self.env.user.login, old_passwd, new_passwd)
if changed:
uid = self.env.user.id
self._set_empty_password(uid)
self.invalidate_cache(['password'], [uid])
return True
return super(Users, self).change_password(old_passwd, new_passwd)
def _set_empty_password(self, uid):
self.env.cr.execute(
'UPDATE res_users SET password=NULL WHERE id=%s',
(uid,)
)
| 38.9 | 2,334 |
318 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
ldaps = fields.One2many(related='company_id.ldaps', string="LDAP Parameters", readonly=False)
| 31.8 | 318 |
9,469 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ldap
import logging
from ldap.filter import filter_format
from odoo import _, api, fields, models, tools
from odoo.exceptions import AccessDenied
from odoo.tools.misc import str2bool
from odoo.tools.pycompat import to_text
_logger = logging.getLogger(__name__)
class CompanyLDAP(models.Model):
_name = 'res.company.ldap'
_description = 'Company LDAP configuration'
_order = 'sequence'
_rec_name = 'ldap_server'
sequence = fields.Integer(default=10)
company = fields.Many2one('res.company', string='Company', required=True, ondelete='cascade')
ldap_server = fields.Char(string='LDAP Server address', required=True, default='127.0.0.1')
ldap_server_port = fields.Integer(string='LDAP Server port', required=True, default=389)
ldap_binddn = fields.Char('LDAP binddn',
help="The user account on the LDAP server that is used to query the directory. "
"Leave empty to connect anonymously.")
ldap_password = fields.Char(string='LDAP password',
help="The password of the user account on the LDAP server that is used to query the directory.")
ldap_filter = fields.Char(string='LDAP filter', required=True, help="""\
Filter used to look up user accounts in the LDAP database. It is an\
arbitrary LDAP filter in string representation. Any `%s` placeholder\
will be replaced by the login (identifier) provided by the user, the filter\
should contain at least one such placeholder.
The filter must result in exactly one (1) result, otherwise the login will\
be considered invalid.
Example (actual attributes depend on LDAP server and setup):
(&(objectCategory=person)(objectClass=user)(sAMAccountName=%s))
or
(|(mail=%s)(uid=%s))
""")
ldap_base = fields.Char(string='LDAP base', required=True, help="DN of the user search scope: all descendants of this base will be searched for users.")
user = fields.Many2one('res.users', string='Template User',
help="User to copy when creating new users")
create_user = fields.Boolean(default=True,
help="Automatically create local user accounts for new users authenticating via LDAP")
ldap_tls = fields.Boolean(string='Use TLS',
help="Request secure TLS/SSL encryption when connecting to the LDAP server. "
"This option requires a server with STARTTLS enabled, "
"otherwise all authentication attempts will fail.")
def _get_ldap_dicts(self):
"""
Retrieve res_company_ldap resources from the database in dictionary
format.
:return: ldap configurations
:rtype: list of dictionaries
"""
ldaps = self.sudo().search([('ldap_server', '!=', False)], order='sequence')
res = ldaps.read([
'id',
'company',
'ldap_server',
'ldap_server_port',
'ldap_binddn',
'ldap_password',
'ldap_filter',
'ldap_base',
'user',
'create_user',
'ldap_tls'
])
return res
def _connect(self, conf):
"""
Connect to an LDAP server specified by an ldap
configuration dictionary.
:param dict conf: LDAP configuration
:return: an LDAP object
"""
uri = 'ldap://%s:%d' % (conf['ldap_server'], conf['ldap_server_port'])
connection = ldap.initialize(uri)
ldap_chase_ref_disabled = self.env['ir.config_parameter'].sudo().get_param('auth_ldap.disable_chase_ref')
if str2bool(ldap_chase_ref_disabled):
connection.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)
if conf['ldap_tls']:
connection.start_tls_s()
return connection
def _get_entry(self, conf, login):
filter_tmpl = conf['ldap_filter']
placeholders = filter_tmpl.count('%s')
if not placeholders:
_logger.warning("LDAP filter %r contains no placeholder ('%%s').", filter_tmpl)
formatted_filter = filter_format(filter_tmpl, [login] * placeholders)
results = self._query(conf, formatted_filter)
# Get rid of results (dn, attrs) without a dn
results = [entry for entry in results if entry[0]]
dn, entry = False, False
if len(results) == 1:
dn, _ = entry = results[0]
return dn, entry
def _authenticate(self, conf, login, password):
"""
Authenticate a user against the specified LDAP server.
In order to prevent an unintended 'unauthenticated authentication',
which is an anonymous bind with a valid dn and a blank password,
check for empty passwords explicitely (:rfc:`4513#section-6.3.1`)
:param dict conf: LDAP configuration
:param login: username
:param password: Password for the LDAP user
:return: LDAP entry of authenticated user or False
:rtype: dictionary of attributes
"""
if not password:
return False
dn, entry = self._get_entry(conf, login)
if not dn:
return False
try:
conn = self._connect(conf)
conn.simple_bind_s(dn, to_text(password))
conn.unbind()
except ldap.INVALID_CREDENTIALS:
return False
except ldap.LDAPError as e:
_logger.error('An LDAP exception occurred: %s', e)
return False
return entry
def _query(self, conf, filter, retrieve_attributes=None):
"""
Query an LDAP server with the filter argument and scope subtree.
Allow for all authentication methods of the simple authentication
method:
- authenticated bind (non-empty binddn + valid password)
- anonymous bind (empty binddn + empty password)
- unauthenticated authentication (non-empty binddn + empty password)
.. seealso::
:rfc:`4513#section-5.1` - LDAP: Simple Authentication Method.
:param dict conf: LDAP configuration
:param filter: valid LDAP filter
:param list retrieve_attributes: LDAP attributes to be retrieved. \
If not specified, return all attributes.
:return: ldap entries
:rtype: list of tuples (dn, attrs)
"""
results = []
try:
conn = self._connect(conf)
ldap_password = conf['ldap_password'] or ''
ldap_binddn = conf['ldap_binddn'] or ''
conn.simple_bind_s(to_text(ldap_binddn), to_text(ldap_password))
results = conn.search_st(to_text(conf['ldap_base']), ldap.SCOPE_SUBTREE, filter, retrieve_attributes, timeout=60)
conn.unbind()
except ldap.INVALID_CREDENTIALS:
_logger.error('LDAP bind failed.')
except ldap.LDAPError as e:
_logger.error('An LDAP exception occurred: %s', e)
return results
def _map_ldap_attributes(self, conf, login, ldap_entry):
"""
Compose values for a new resource of model res_users,
based upon the retrieved ldap entry and the LDAP settings.
:param dict conf: LDAP configuration
:param login: the new user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: parameters for a new resource of model res_users
:rtype: dict
"""
return {
'name': tools.ustr(ldap_entry[1]['cn'][0]),
'login': login,
'company_id': conf['company'][0]
}
def _get_or_create_user(self, conf, login, ldap_entry):
"""
Retrieve an active resource of model res_users with the specified
login. Create the user if it is not initially found.
:param dict conf: LDAP configuration
:param login: the user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: res_users id
:rtype: int
"""
login = tools.ustr(login.lower().strip())
self.env.cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,))
res = self.env.cr.fetchone()
if res:
if res[1]:
return res[0]
elif conf['create_user']:
_logger.debug("Creating new Odoo user \"%s\" from LDAP" % login)
values = self._map_ldap_attributes(conf, login, ldap_entry)
SudoUser = self.env['res.users'].sudo().with_context(no_reset_password=True)
if conf['user']:
values['active'] = True
return SudoUser.browse(conf['user'][0]).copy(default=values).id
else:
return SudoUser.create(values).id
raise AccessDenied(_("No local user found for LDAP login and not configured to create one"))
def _change_password(self, conf, login, old_passwd, new_passwd):
changed = False
dn, entry = self._get_entry(conf, login)
if not dn:
return False
try:
conn = self._connect(conf)
conn.simple_bind_s(dn, to_text(old_passwd))
conn.passwd_s(dn, old_passwd, new_passwd)
changed = True
conn.unbind()
except ldap.INVALID_CREDENTIALS:
pass
except ldap.LDAPError as e:
_logger.error('An LDAP exception occurred: %s', e)
return changed
| 38.336032 | 9,469 |
1,539 |
py
|
PYTHON
|
15.0
|
{
'name': 'Base import',
'description': """
New extensible file import for Odoo
======================================
Re-implement Odoo's file import system:
* Server side, the previous system forces most of the logic into the
client which duplicates the effort (between clients), makes the
import system much harder to use without a client (direct RPC or
other forms of automation) and makes knowledge about the
import/export system much harder to gather as it is spread over
3+ different projects.
* In a more extensible manner, so users and partners can build their
own front-end to import from other file formats (e.g. OpenDocument
files) which may be simpler to handle in their work flow or from
their data production sources.
* In a module, so that administrators and users of Odoo who do not
need or want an online import can avoid it being available to users.
""",
'depends': ['web'],
'version': '2.0',
'category': 'Hidden/Tools',
'installable': True,
'auto_install': True,
'data': [
'security/ir.model.access.csv',
],
'assets': {
'web.assets_qweb': [
'base_import/static/src/**/*.xml',
],
'web.assets_backend': [
'base_import/static/lib/javascript-state-machine/state-machine.js',
'base_import/static/src/**/*.scss',
'base_import/static/src/**/*.js',
],
'web.qunit_suite_tests': [
'base_import/static/tests/**/*',
],
},
'license': 'LGPL-3',
}
| 33.456522 | 1,539 |
38,208 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import difflib
import io
import pprint
import unittest
from odoo.tests.common import TransactionCase, can_import
from odoo.modules.module import get_module_resource
from odoo.tools import mute_logger, pycompat
from odoo.addons.base_import.models.base_import import ImportValidationError
ID_FIELD = {
'id': 'id',
'name': 'id',
'string': "External ID",
'required': False,
'fields': [],
'type': 'id',
}
def make_field(name='value', string='Value', required=False, fields=None, field_type='id', model_name=None, comodel_name=None):
if fields is None:
fields = []
field = {'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields, 'type': field_type}
if model_name:
field['model_name'] = model_name
if comodel_name:
field['comodel_name'] = comodel_name
return [
ID_FIELD,
field,
]
def sorted_fields(fields):
""" recursively sort field lists to ease comparison """
recursed = [dict(field, fields=sorted_fields(field['fields'])) for field in fields]
return sorted(recursed, key=lambda field: field['id'])
class BaseImportCase(TransactionCase):
def assertEqualFields(self, fields1, fields2):
f1 = sorted_fields(fields1)
f2 = sorted_fields(fields2)
assert f1 == f2, '\n'.join(difflib.unified_diff(
pprint.pformat(f1).splitlines(),
pprint.pformat(f2).splitlines()
))
class TestBasicFields(BaseImportCase):
def get_fields(self, field):
return self.env['base_import.import'].get_fields_tree('base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqualFields(self.get_fields('char'), make_field(field_type='char', model_name='base_import.tests.models.char'))
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqualFields(self.get_fields('char.required'), make_field(required=True, field_type='char', model_name='base_import.tests.models.char.required'))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqualFields(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqualFields(self.get_fields('char.states'), make_field(field_type='char', model_name='base_import.tests.models.char.states'))
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqualFields(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqualFields(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqualFields(self.get_fields('m2o'), make_field(
field_type='many2one', comodel_name='base_import.tests.models.m2o.related', model_name='base_import.tests.models.m2o',
fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': [], 'type': 'id', 'model_name': 'base_import.tests.models.m2o'},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': [], 'type': 'id', 'model_name': 'base_import.tests.models.m2o'},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqualFields(self.get_fields('m2o.required'), make_field(
field_type='many2one', required=True, comodel_name='base_import.tests.models.m2o.required.related', model_name='base_import.tests.models.m2o.required',
fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': [], 'type': 'id', 'model_name': 'base_import.tests.models.m2o.required'},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': [], 'type': 'id', 'model_name': 'base_import.tests.models.m2o.required'},
]))
class TestO2M(BaseImportCase):
def get_fields(self, field):
return self.env['base_import.import'].get_fields_tree('base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqualFields(
self.get_fields('o2m'), [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': "Name", 'required': False, 'fields': [], 'type': 'char', 'model_name': 'base_import.tests.models.o2m'},
{
'id': 'value', 'name': 'value', 'string': 'Value', 'model_name': 'base_import.tests.models.o2m',
'required': False, 'type': 'one2many', 'comodel_name': 'base_import.tests.models.o2m.child',
'fields': [
ID_FIELD,
{
'id': 'parent_id', 'name': 'parent_id', 'model_name': 'base_import.tests.models.o2m.child',
'string': 'Parent', 'type': 'many2one', 'comodel_name': 'base_import.tests.models.o2m',
'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'model_name': 'base_import.tests.models.o2m.child',
'string': 'External ID', 'required': False,
'fields': [], 'type': 'id'},
{'id': 'parent_id', 'name': '.id', 'model_name': 'base_import.tests.models.o2m.child',
'string': 'Database ID', 'required': False,
'fields': [], 'type': 'id'},
]
},
{'id': 'value', 'name': 'value', 'string': 'Value',
'required': False, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.o2m.child',
},
]
}
]
)
class TestMatchHeadersSingle(TransactionCase):
def test_match_by_name(self):
match = self.env['base_import.import']._get_mapping_suggestion('f0', [{'name': 'f0'}], [], {})
self.assertEqual(match, {'field_path': ['f0'], 'distance': 0})
def test_match_by_string(self):
match = self.env['base_import.import']._get_mapping_suggestion('some field', [{'name': 'bob', 'string': "Some Field"}], [], {})
self.assertEqual(match, {'field_path': ['bob'], 'distance': 0})
def test_nomatch(self):
match = self.env['base_import.import']._get_mapping_suggestion('should not be', [{'name': 'bob', 'string': "wheee"}], [], {})
self.assertEqual(match, {})
def test_close_match(self):
match = self.env['base_import.import']._get_mapping_suggestion('bobe', [{'name': 'bob', 'type': 'char'}], ['char'], {})
self.assertEqual(match, {'field_path': ['bob'], 'distance': 0.1428571428571429})
def test_distant_match(self):
Import = self.env['base_import.import']
header, field_string = 'same Folding', 'Some Field'
match = Import._get_mapping_suggestion(header, [{'name': 'bob', 'string': field_string, 'type': 'char'}], ['char'], {})
string_field_dist = Import._get_distance(header.lower(), field_string.lower())
self.assertEqual(string_field_dist, 0.36363636363636365)
self.assertEqual(match, {}) # if distance >= 0.2, no match returned
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.env['base_import.import']._get_mapping_suggestion('f0/f1', [f], [], {})
self.assertEqual(match, {'field_path': [f['name'], f['fields'][1]['name']]})
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.env['base_import.import']._get_mapping_suggestion('f0/f2', [f], [], {})
self.assertEqual(match, {})
class TestMatchHeadersMultiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.env['base_import.import']._get_mapping_suggestions([], {}, []), {}
)
def test_nomatch(self):
self.assertEqual(
self.env['base_import.import']._get_mapping_suggestions(
['foo', 'bar', 'baz', 'qux'],
{
(0, 'foo'): ['int'],
(1, 'bar'): ['char'],
(2, 'baz'): ['text'],
(3, 'qux'): ['many2one']
},
{}),
{
(0, 'foo'): None,
(1, 'bar'): None,
(2, 'baz'): None,
(3, 'qux'): None
}
)
def test_mixed(self):
self.assertEqual(
self.env['base_import.import']._get_mapping_suggestions(
'foo bar baz qux/corge'.split(),
{
(0, 'foo'): ['int'],
(1, 'bar'): ['char'],
(2, 'baz'): ['text'],
(3, 'qux/corge'): ['text']
},
[
{'name': 'bar', 'string': 'Bar', 'type': 'char'},
{'name': 'bob', 'string': 'Baz', 'type': 'text'},
{'name': 'qux', 'string': 'Qux', 'type': 'many2one', 'fields': [
{'name': 'corge', 'type': 'text', 'fields': []},
]}
]),
{
(0, 'foo'): None,
(1, 'bar'): {'field_path': ['bar'], 'distance': 0},
(2, 'baz'): {'field_path': ['bob'], 'distance': 0},
(3, 'qux/corge'): {'field_path': ['qux', 'corge']}
}
)
class TestColumnMapping(TransactionCase):
def test_column_mapping(self):
import_record = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': u"Name,Some Value,value\n"
u"chhagan,10,1\n"
u"magan,20,2\n".encode('utf-8'),
'file_type': 'text/csv',
'file_name': 'data.csv',
})
import_record.execute_import(
['name', 'somevalue', 'othervalue'],
['Name', 'Some Value', 'value'],
{'quoting': '"', 'separator': ',', 'has_headers': True},
True
)
fields = self.env['base_import.mapping'].search_read(
[('res_model', '=', 'base_import.tests.models.preview')],
['column_name', 'field_name']
)
self.assertItemsEqual([f['column_name'] for f in fields], ['Name', 'Some Value', 'value'])
self.assertItemsEqual([f['field_name'] for f in fields], ['somevalue', 'name', 'othervalue'])
def test_fuzzy_match_distance(self):
values_to_test = [
('opportunities', 'opportinuties'),
('opportunities', 'opportunate'),
('opportunities', 'operable'),
('opportunities', 'purchasing'),
('lead_id', 'laed_id'),
('lead_id', 'leen_id'),
('lead_id', 'let_id_be'),
('lead_id', 'not related'),
]
Import = self.env['base_import.import']
max_distance = 0.2 # see FUZZY_MATCH_DISTANCE. We don't use it here to avoid making test work after modifying this constant.
for value in values_to_test:
distance = Import._get_distance(value[0].lower(), value[1].lower())
model_fields_info = [{'name': value[0], 'string': value[0], 'type': 'char'}]
match = self.env['base_import.import']._get_mapping_suggestion(value[1], model_fields_info, ['char'], {})
self.assertEqual(
bool(match), distance < max_distance
)
class TestPreview(TransactionCase):
def make_import(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
'file_type': 'text/csv',
'file_name': 'kr_data.csv',
})
return import_wizard
@mute_logger('odoo.addons.base_import.models.base_import')
def test_encoding(self):
import_wizard = self.make_import()
result = import_wizard.parse_preview({
'quoting': '"',
'separator': ',',
})
self.assertFalse('error' in result)
@mute_logger('odoo.addons.base_import.models.base_import')
def test_csv_errors(self):
import_wizard = self.make_import()
result = import_wizard.parse_preview({
'quoting': 'foo',
'separator': ',',
})
self.assertTrue('error' in result)
result = import_wizard.parse_preview({
'quoting': '"',
'separator': 'bob',
})
self.assertTrue('error' in result)
def test_csv_success(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,,\n'
b'bar,,4\n'
b'qux,5,6\n',
'file_type': 'text/csv'
})
result = import_wizard.parse_preview({
'quoting': '"',
'separator': ',',
'has_headers': True,
})
self.assertIsNone(result.get('error'))
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue']})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required': False, 'fields': [], 'type': 'char', 'model_name': 'base_import.tests.models.preview'},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required': True, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required': False, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
])
self.assertEqual(result['preview'], [['foo', 'bar', 'qux'], ['5'], ['4', '6']])
@unittest.skipUnless(can_import('xlrd'), "XLRD module not available")
def test_xls_success(self):
xls_file_path = get_module_resource('base_import', 'tests', 'test.xls')
file_content = open(xls_file_path, 'rb').read()
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': file_content,
'file_type': 'application/vnd.ms-excel'
})
result = import_wizard.parse_preview({
'has_headers': True,
})
self.assertIsNone(result.get('error'))
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue']})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required': False, 'fields': [], 'type': 'char', 'model_name': 'base_import.tests.models.preview'},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required': True, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required': False, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
])
self.assertEqual(result['preview'], [['foo', 'bar', 'qux'], ['1', '3', '5'], ['2', '4', '6']])
@unittest.skipUnless(can_import('xlrd.xlsx'), "XLRD/XLSX not available")
def test_xlsx_success(self):
xlsx_file_path = get_module_resource('base_import', 'tests', 'test.xlsx')
file_content = open(xlsx_file_path, 'rb').read()
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': file_content,
'file_type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
})
result = import_wizard.parse_preview({
'has_headers': True,
})
self.assertIsNone(result.get('error'))
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue']})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required': False, 'fields': [], 'type': 'char', 'model_name': 'base_import.tests.models.preview'},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required': True, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required': False, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
])
self.assertEqual(result['preview'], [['foo', 'bar', 'qux'], ['1', '3', '5'], ['2', '4', '6']])
@unittest.skipUnless(can_import('odf'), "ODFPY not available")
def test_ods_success(self):
ods_file_path = get_module_resource('base_import', 'tests', 'test.ods')
file_content = open(ods_file_path, 'rb').read()
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': file_content,
'file_type': 'application/vnd.oasis.opendocument.spreadsheet'
})
result = import_wizard.parse_preview({
'has_headers': True,
})
self.assertIsNone(result.get('error'))
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue']})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required': False, 'fields': [], 'type': 'char', 'model_name': 'base_import.tests.models.preview'},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required': True, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required': False, 'fields': [], 'type': 'integer', 'model_name': 'base_import.tests.models.preview'},
])
self.assertEqual(result['preview'], [['foo', 'bar', 'aux'], ['1', '3', '5'], ['2', '4', '6']])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.load
"""
def test_all(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,1,2\n'
b'bar,3,4\n'
b'qux,5,6\n',
'file_type': 'text/csv'
})
data, fields = import_wizard._convert_import_data(
['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'has_headers': True}
)
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
def test_date_fields(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file': u'name,date,create_date\n'
u'"foo","2013年07月18日","2016-10-12 06:06"\n'.encode('utf-8'),
'file_type': 'text/csv'
})
results = import_wizard.execute_import(
['name', 'date', 'create_date'],
[],
{
'date_format': '%Y年%m月%d日',
'datetime_format': '%Y-%m-%d %H:%M',
'quoting': '"',
'separator': ',',
'has_headers': True
}
)
# if results empty, no errors
self.assertItemsEqual(results['messages'], [])
def test_parse_relational_fields(self):
""" Ensure that relational fields float and date are correctly
parsed during the import call.
"""
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file': u'name,parent_id/id,parent_id/date,parent_id/credit_limit\n'
u'"foo","__export__.res_partner_1","2017年10月12日","5,69"\n'.encode('utf-8'),
'file_type': 'text/csv'
})
options = {
'date_format': '%Y年%m月%d日',
'quoting': '"',
'separator': ',',
'float_decimal_separator': ',',
'float_thousand_separator': '.',
'has_headers': True
}
data, import_fields = import_wizard._convert_import_data(
['name', 'parent_id/.id', 'parent_id/date', 'parent_id/credit_limit'],
options
)
result = import_wizard._parse_import_data(data, import_fields, options)
# Check if the data 5,69 as been correctly parsed.
self.assertEqual(float(result[0][-1]), 5.69)
self.assertEqual(str(result[0][-2]), '2017-10-12')
def test_parse_scientific_notation(self):
""" Ensure that scientific notation is correctly converted to decimal """
import_wizard = self.env['base_import.import']
test_options = {}
test_data = [
["1E+05"],
["1.20E-05"],
["1,9e5"],
["9,5e-5"],
]
expected_result = [
["100000.000000"],
["0.000012"],
["190000.000000"],
["0.000095"],
]
import_wizard._parse_float_from_data(test_data, 0, 'test-name', test_options)
self.assertEqual(test_data, expected_result)
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,1,2\n'
b'bar,3,4\n'
b'qux,5,6\n',
'file_type': 'text/csv'
})
data, fields = import_wizard._convert_import_data(
['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'has_headers': True}
)
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
['foo', '2'],
['bar', '4'],
['qux', '6'],
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,1,2\n'
b',3,\n'
b',5,6\n',
'file_type': 'text/csv'
})
data, fields = import_wizard._convert_import_data(
['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'has_headers': True}
)
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
['foo', '2'],
['', '6'],
])
def test_empty_rows(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value\n'
b'foo,1\n'
b'\n'
b'bar,2\n'
b' \n'
b'\t \n',
'file_type': 'text/csv'
})
data, fields = import_wizard._convert_import_data(
['name', 'somevalue'],
{'quoting': '"', 'separator': ',', 'has_headers': True}
)
self.assertItemsEqual(fields, ['name', 'somevalue'])
self.assertItemsEqual(data, [
['foo', '1'],
['bar', '2'],
])
def test_nofield(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,1,2\n',
'file_type': 'text/csv'
})
self.assertRaises(ImportValidationError, import_wizard._convert_import_data, [], {'quoting': '"', 'separator': ',', 'has_headers': True})
def test_falsefields(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': b'name,Some Value,Counter\n'
b'foo,1,2\n',
'file_type': 'text/csv'
})
self.assertRaises(
ImportValidationError,
import_wizard._convert_import_data,
[False, False, False],
{'quoting': '"', 'separator': ',', 'has_headers': True})
def test_newline_import(self):
"""
Ensure importing keep newlines
"""
output = io.BytesIO()
writer = pycompat.csv_writer(output, quoting=1)
data_row = [u"\tfoo\n\tbar", u" \"hello\" \n\n 'world' "]
writer.writerow([u"name", u"Some Value"])
writer.writerow(data_row)
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file': output.getvalue(),
'file_type': 'text/csv',
})
data, _ = import_wizard._convert_import_data(
['name', 'somevalue'],
{'quoting': '"', 'separator': ',', 'has_headers': True}
)
self.assertItemsEqual(data, [data_row])
def test_set_empty_value_import(self):
partners_before = self.env['res.partner'].search([])
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file': """foo,US,person\n
foo1,Invalid Country,person\n
foo2,US,persons\n""",
'file_type': 'text/csv'
})
results = import_wizard.execute_import(
['name', 'country_id', 'company_type'],
[],
{
'quoting': '"',
'separator': ',',
'import_set_empty_fields': ['country_id', 'company_type'],
}
)
partners_now = self.env['res.partner'].search([]) - partners_before
self.assertEqual(len(results['ids']), 3, "should have imported the first 3 records in full, got %s" % results['ids'])
self.assertEqual(partners_now[0].name, 'foo', "New partner's name should be foo")
self.assertEqual(partners_now[0].country_id.id, self.env.ref('base.us').id, "Foo partner's country should be US")
self.assertEqual(partners_now[0].company_type, 'person', "Foo partner's country should be person")
self.assertEqual(partners_now[1].country_id.id, False, "foo1 partner's country should be False")
self.assertEqual(partners_now[2].company_type, False, "foo2 partner's country should be False")
# if results empty, no errors
self.assertItemsEqual(results['messages'], [])
def test_skip_record_import(self):
partners_before = self.env['res.partner'].search([])
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file': """foo,US,0,person\n
foo1,Invalid Country,0,person\n
foo2,US,False Value,person\n
foo3,US,0,persons\n""",
'file_type': 'text/csv'
})
results = import_wizard.execute_import(
['name', 'country_id', 'is_company', 'company_type'],
[],
{
'quoting': '"',
'separator': ',',
'import_skip_records': ['country_id', 'is_company', 'company_type']
}
)
partners_now = self.env['res.partner'].search([]) - partners_before
self.assertEqual(len(results['ids']), 1, "should have imported the first record in full, got %s" % results['ids'])
self.assertEqual(partners_now.name, 'foo', "New partner's name should be foo")
# if results empty, no errors
self.assertItemsEqual(results['messages'], [])
def test_multi_mapping(self):
""" Test meant specifically for the '_handle_multi_mapping' that allows mapping multiple
columns to the same field and merging the values together.
It makes sure that values of type Char and Many2many are correctly merged. """
tag1, tag2, tag3 = self.env['res.partner.category'].create([{
'name': 'tag1',
}, {
'name': 'tag2',
}, {
'name': 'tag3',
}])
file_partner_values = [
['Mitchel', 'US', 'Admin', 'The Admin User', 'tag1,tag2', 'tag3'],
['Marc', 'US', 'Demo', 'The Demo User', '', 'tag3'],
['Joel', 'US', 'Portal', '', 'tag1', 'tag3'],
]
existing_partners = self.env['res.partner'].search_read([], ['id'])
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file': '\n'.join([';'.join(partner_values) for partner_values in file_partner_values]),
'file_type': 'text/csv',
})
results = import_wizard.execute_import(
['name', 'country_id', 'name', 'name', 'category_id', 'category_id'],
[],
{
'quoting': '"',
'separator': ';',
},
)
# if result is empty, no import error
self.assertItemsEqual(results['messages'], [])
partners = self.env['res.partner'].search([
('id', 'not in', [existing_partner['id'] for existing_partner in existing_partners])
], order='id asc')
self.assertEqual(3, len(partners))
self.assertEqual('Mitchel Admin The Admin User', partners[0].name)
self.assertEqual('Marc Demo The Demo User', partners[1].name)
self.assertEqual('Joel Portal', partners[2].name)
self.assertEqual(tag1 | tag2 | tag3, partners[0].category_id)
self.assertEqual(tag3, partners[1].category_id)
self.assertEqual(tag1 | tag3, partners[2].category_id)
class TestBatching(TransactionCase):
def _makefile(self, rows):
f = io.BytesIO()
writer = pycompat.csv_writer(f, quoting=1)
writer.writerow(['name', 'counter'])
for i in range(rows):
writer.writerow(['n_%d' % i, str(i)])
return f.getvalue()
def test_recognize_batched(self):
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.preview',
'file_type': 'text/csv',
})
import_wizard.file = self._makefile(10)
result = import_wizard.parse_preview({
'quoting': '"',
'separator': ',',
'has_headers': True,
'limit': 100,
})
self.assertIsNone(result.get('error'))
self.assertIs(result['batch'], False)
result = import_wizard.parse_preview({
'quoting': '"',
'separator': ',',
'has_headers': True,
'limit': 5,
})
self.assertIsNone(result.get('error'))
self.assertIs(result['batch'], True)
def test_limit_on_lines(self):
""" The limit option should be a limit on the number of *lines*
imported at at time, not the number of *records*. This is relevant
when it comes to embedded o2m.
A big question is whether we want to round up or down (if the limit
brings us inside a record). Rounding up (aka finishing up the record
we're currently parsing) seems like a better idea:
* if the first record has so many sub-lines it hits the limit we still
want to import it (it's probably extremely rare but it can happen)
* if we have one line per record, we probably want to import <limit>
records not <limit-1>, but if we stop in the middle of the "current
record" we'd always ignore the last record (I think)
"""
f = io.BytesIO()
writer = pycompat.csv_writer(f, quoting=1)
writer.writerow(['name', 'value/value'])
for record in range(10):
writer.writerow(['record_%d' % record, '0'])
for row in range(1, 10):
writer.writerow(['', str(row)])
import_wizard = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.o2m',
'file_type': 'text/csv',
'file_name': 'things.csv',
'file': f.getvalue(),
})
opts = {'quoting': '"', 'separator': ',', 'has_headers': True}
preview = import_wizard.parse_preview({**opts, 'limit': 15})
self.assertIs(preview['batch'], True)
results = import_wizard.execute_import(
['name', 'value/value'], [],
{**opts, 'limit': 5}
)
self.assertFalse(results['messages'])
self.assertEqual(len(results['ids']), 1, "should have imported the first record in full, got %s" % results['ids'])
self.assertEqual(results['nextrow'], 10)
results = import_wizard.execute_import(
['name', 'value/value'], [],
{**opts, 'limit': 15}
)
self.assertFalse(results['messages'])
self.assertEqual(len(results['ids']), 2, "should have importe the first two records, got %s" % results['ids'])
self.assertEqual(results['nextrow'], 20)
def test_batches(self):
partners_before = self.env['res.partner'].search([])
opts = {'has_headers': True, 'separator': ',', 'quoting': '"'}
import_wizard = self.env['base_import.import'].create({
'res_model': 'res.partner',
'file_type': 'text/csv',
'file_name': 'clients.csv',
'file': b"""name,email
a,[email protected]
b,[email protected]
,
c,[email protected]
d,[email protected]
e,[email protected]
f,[email protected]
g,[email protected]
"""
})
results = import_wizard.execute_import(['name', 'email'], [], {**opts, 'limit': 1})
self.assertFalse(results['messages'])
self.assertEqual(len(results['ids']), 1)
# titlerow is ignored by lastrow's counter
self.assertEqual(results['nextrow'], 1)
partners_1 = self.env['res.partner'].search([]) - partners_before
self.assertEqual(partners_1.name, 'a')
results = import_wizard.execute_import(['name', 'email'], [], {**opts, 'limit': 2, 'skip': 1})
self.assertFalse(results['messages'])
self.assertEqual(len(results['ids']), 2)
# empty row should also be ignored
self.assertEqual(results['nextrow'], 3)
partners_2 = self.env['res.partner'].search([]) - (partners_before | partners_1)
self.assertEqual(partners_2.mapped('name'), ['b', 'c'])
results = import_wizard.execute_import(['name', 'email'], [], {**opts, 'limit': 10, 'skip': 3})
self.assertFalse(results['messages'])
self.assertEqual(len(results['ids']), 4)
self.assertEqual(results['nextrow'], 0)
partners_3 = self.env['res.partner'].search([]) - (partners_before | partners_1 | partners_2)
self.assertEqual(partners_3.mapped('name'), ['d', 'e', 'f', 'g'])
class test_failures(TransactionCase):
def test_big_attachments(self):
"""
Ensure big fields (e.g. b64-encoded image data) can be imported and
we're not hitting limits of the default CSV parser config
"""
from PIL import Image
im = Image.new('RGB', (1920, 1080))
fout = io.BytesIO()
writer = pycompat.csv_writer(fout, dialect=None)
writer.writerows([
[u'name', u'db_datas'],
[u'foo', base64.b64encode(im.tobytes()).decode('ascii')]
])
import_wizard = self.env['base_import.import'].create({
'res_model': 'ir.attachment',
'file': fout.getvalue(),
'file_type': 'text/csv'
})
results = import_wizard.execute_import(
['name', 'db_datas'],
[],
{'has_headers': True, 'separator': ',', 'quoting': '"'})
self.assertFalse(results['messages'], "results should be empty on successful import")
| 41.674672 | 38,174 |
5,199 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
"""
Tests for various autodetection magics for CSV imports
"""
import codecs
from odoo.tests import common
class ImportCase(common.TransactionCase):
def _make_import(self, contents):
return self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.complex',
'file_name': 'f',
'file_type': 'text/csv',
'file': contents,
})
class TestEncoding(ImportCase):
"""
create + parse_preview -> check result options
"""
def _check_text(self, text, encodings, **options):
options.setdefault('quoting', '"')
options.setdefault('separator', '\t')
test_text = "text\tnumber\tdate\tdatetime\n%s\t1.23.45,67\t\t\n" % text
for encoding in ['utf-8', 'utf-16', 'utf-32', *encodings]:
if isinstance(encoding, tuple):
encoding, es = encoding
else:
es = [encoding]
preview = self._make_import(
test_text.encode(encoding)).parse_preview(dict(options))
self.assertIsNone(preview.get('error'))
guessed = preview['options']['encoding']
self.assertIsNotNone(guessed)
self.assertIn(
codecs.lookup(guessed).name, [
codecs.lookup(e).name
for e in es
]
)
def test_autodetect_encoding(self):
""" Check that import preview can detect & return encoding
"""
self._check_text("Iñtërnâtiônàlizætiøn", [('iso-8859-1', ['iso-8859-1', 'iso-8859-2'])])
self._check_text("やぶら小路の藪柑子。海砂利水魚の、食う寝る処に住む処、パイポパイポ パイポのシューリンガン。", ['eucjp', 'shift_jis', 'iso2022_jp'])
self._check_text("대통령은 제4항과 제5항의 규정에 의하여 확정된 법률을 지체없이 공포하여야 한다, 탄핵의 결정.", ['euc_kr', 'iso2022_kr'])
# + control in widget
def test_override_detection(self):
""" ensure an explicitly specified encoding is not overridden by the
auto-detection
"""
s = "Iñtërnâtiônàlizætiøn".encode('utf-8')
r = self._make_import(s + b'\ntext')\
.parse_preview({
'quoting': '"',
'separator': '\t',
'encoding': 'iso-8859-1',
})
self.assertIsNone(r.get('error'))
self.assertEqual(r['options']['encoding'], 'iso-8859-1')
self.assertEqual(r['preview'], [[s.decode('iso-8859-1'), 'text']])
class TestFileSeparator(ImportCase):
def setUp(self):
super().setUp()
self.imp = self._make_import(
"""c|f
a|1
b|2
c|3
d|4
""")
def test_explicit_success(self):
r = self.imp.parse_preview({
'separator': '|',
'has_headers': True,
'quoting': '"',
})
self.assertIsNone(r.get('error'))
self.assertEqual(r['headers'], ['c', 'f'])
self.assertEqual(r['preview'], [['a', 'b', 'c', 'd'], ['1', '2', '3', '4']])
self.assertEqual(r['options']['separator'], '|')
def test_explicit_fail(self):
""" Don't protect user against making mistakes
"""
r = self.imp.parse_preview({
'separator': ',',
'has_headers': True,
'quoting': '"',
})
self.assertIsNone(r.get('error'))
self.assertEqual(r['headers'], ['c|f'])
self.assertEqual(r['preview'], [['a|1', 'b|2', 'c|3', 'd|4']])
self.assertEqual(r['options']['separator'], ',')
def test_guess_ok(self):
r = self.imp.parse_preview({
'separator': '',
'has_headers': True,
'quoting': '"',
})
self.assertIsNone(r.get('error'))
self.assertEqual(r['headers'], ['c', 'f'])
self.assertEqual(r['preview'], [['a', 'b', 'c', 'd'], ['1', '2', '3', '4']])
self.assertEqual(r['options']['separator'], '|')
def test_noguess(self):
""" If the guesser has no idea what the separator is, it defaults to
"," but should not set that value
"""
imp = self._make_import('c\na\nb\nc\nd')
r = imp.parse_preview({
'separator': '',
'has_headers': True,
'quoting': '"',
})
self.assertIsNone(r.get('error'))
self.assertEqual(r['headers'], ['c'])
self.assertEqual(r['preview'], [['a', 'b', 'c', 'd']])
self.assertEqual(r['options']['separator'], '')
class TestNumberSeparators(common.TransactionCase):
def test_parse_float(self):
w = self.env['base_import.import'].create({
'res_model': 'base_import.tests.models.float',
})
data = w._parse_import_data(
[
['1.62'], ['-1.62'], ['+1.62'], [' +1.62 '], ['(1.62)'],
["1'234'567,89"], ["1.234.567'89"]
],
['value'], {}
)
self.assertEqual(
[d[0] for d in data],
['1.62', '-1.62', '+1.62', '+1.62', '-1.62',
'1234567.89', '1234567.89']
)
| 33.019737 | 5,019 |
71,643 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import binascii
import codecs
import collections
import difflib
import unicodedata
import chardet
import datetime
import io
import itertools
import logging
import psycopg2
import operator
import os
import re
import requests
from PIL import Image
from odoo import api, fields, models
from odoo.tools.translate import _
from odoo.tools.mimetypes import guess_mimetype
from odoo.tools import config, DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, pycompat
FIELDS_RECURSION_LIMIT = 3
ERROR_PREVIEW_BYTES = 200
DEFAULT_IMAGE_TIMEOUT = 3
DEFAULT_IMAGE_MAXBYTES = 10 * 1024 * 1024
DEFAULT_IMAGE_REGEX = r"^(?:http|https)://"
DEFAULT_IMAGE_CHUNK_SIZE = 32768
IMAGE_FIELDS = ["icon", "image", "logo", "picture"]
_logger = logging.getLogger(__name__)
BOM_MAP = {
'utf-16le': codecs.BOM_UTF16_LE,
'utf-16be': codecs.BOM_UTF16_BE,
'utf-32le': codecs.BOM_UTF32_LE,
'utf-32be': codecs.BOM_UTF32_BE,
}
try:
import xlrd
try:
from xlrd import xlsx
except ImportError:
xlsx = None
except ImportError:
xlrd = xlsx = None
try:
from . import odf_ods_reader
except ImportError:
odf_ods_reader = None
FILE_TYPE_DICT = {
'text/csv': ('csv', True, None),
'application/vnd.ms-excel': ('xls', xlrd, 'xlrd'),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': ('xlsx', xlsx, 'xlrd >= 1.0.0'),
'application/vnd.oasis.opendocument.spreadsheet': ('ods', odf_ods_reader, 'odfpy')
}
EXTENSIONS = {
'.' + ext: handler
for mime, (ext, handler, req) in FILE_TYPE_DICT.items()
}
class ImportValidationError(Exception):
"""
This class is made to correctly format all the different error types that
can occur during the pre-validation of the import that is made before
calling the data loading itself. The Error data structure is meant to copy
the one of the errors raised during the data loading. It simplifies the
error management at client side as all errors can be treated the same way.
This exception is typically raised when there is an error during data
parsing (image, int, dates, etc..) or if the user did not select at least
one field to map with a column.
"""
def __init__(self, message, **kwargs):
super().__init__(message)
self.type = kwargs.get('error_type', 'error')
self.message = message
self.record = False
self.not_matching_error = True
self.field_path = [kwargs['field']] if kwargs.get('field') else False
self.field_type = kwargs.get('field_type')
class Base(models.AbstractModel):
_inherit = 'base'
@api.model
def get_import_templates(self):
"""
Get the import templates label and path.
:return: a list(dict) containing label and template path
like ``[{'label': 'foo', 'template': 'path'}]``
"""
return []
class ImportMapping(models.Model):
""" mapping of previous column:field selections
This is useful when repeatedly importing from a third-party
system: column names generated by the external system may
not match Odoo's field names or labels. This model is used
to save the mapping between column names and fields so that
next time a user imports from the same third-party systems
we can automatically match the columns to the correct field
without them having to re-enter the mapping every single
time.
"""
_name = 'base_import.mapping'
_description = 'Base Import Mapping'
res_model = fields.Char(index=True)
column_name = fields.Char()
field_name = fields.Char()
class ResUsers(models.Model):
_inherit = 'res.users'
def _can_import_remote_urls(self):
""" Hook to decide whether the current user is allowed to import
images via URL (as such an import can DOS a worker). By default,
allows the administrator group.
:rtype: bool
"""
self.ensure_one()
return self._is_admin()
class Import(models.TransientModel):
"""
This model is used to prepare the loading of data coming from a user file.
Here is the process that is followed:
#. The user selects a file to import.
#. File parsing and mapping suggestion (see "parse_preview" method)
#. Extract the current model's importable fields tree (see :meth:`get_fields_tree`).
#. Read the file (see :meth:`_read_file`) and extract header names and file
length (used for batch import).
#. Extract headers types from the data preview (10 first line of the file)
(see :meth:`_extract_headers_types`).
#. Try to find for each header a field to map with (see :meth:`_get_mapping_suggestions`)
- First check the previously saved mappings between the header name
and one of the model's fields.
- If no mapping found, try an exact match comparison using fields
technical names, labels and user language translated labels.
- If nothing found, try a fuzzy match using word distance between
header name and fields tachnical names, labels and user language
translated labels. Keep only the closest match.
#. Prepare examples for each columns using the first non null value from each column.
#. Send the info back to the UI where the user can modify the suggested mapping.
#. Execute the import: There are two import mode with uses the same process. (see :meth:`execute_import`)
#. Test import: Try to import but rollback the transaction. This allows
the check errors during the import process and allow the user to
choose import options for the different encountered errors.
#. Real import: Try to import the file using the configured mapping and
the eventual "error mapping options". If import encounters blocking
errors, the transaction is rollbacked and the user is allowed to
choose import options for the different errors.
- Get file data and fields to import into (see :meth:`_convert_import_data`).
- Parse date, float and binary data (see :meth:`_parse_import_data`).
- Handle multiple mapping -> concatenate char/text/many2many columns
mapped on the same field (see :meth:`_handle_multi_mapping`).
- Handle fallback values for boolean and selection fields, in case
input data does not match any allowed values (see :meth:`_handle_fallback_values`).
- Load data (see ir.model "load" method).
- Rollback transaction if test mode or if encountered error.
- Save mapping if any import is successful to ease later mapping suggestions.
- Return import result to the UI (success or errors if any).
"""
_name = 'base_import.import'
_description = 'Base Import'
# allow imports to survive for 12h in case user is slow
_transient_max_hours = 12.0
# we consider that if the difference is more than 0.2, then the two compared strings are "too different" to propose
# any match between them. (see '_get_mapping_suggestion' for more details)
FUZZY_MATCH_DISTANCE = 0.2
res_model = fields.Char('Model')
file = fields.Binary('File', help="File to check and/or import, raw binary (not base64)", attachment=False)
file_name = fields.Char('File Name')
file_type = fields.Char('File Type')
@api.model
def get_fields_tree(self, model, depth=FIELDS_RECURSION_LIMIT):
""" Recursively get fields for the provided model (through
fields_get) and filter them according to importability
The output format is a list of :class:`Field`:
.. class:: Field
.. attribute:: id: str
A non-unique identifier for the field, used to compute
the span of the ``required`` attribute: if multiple
``required`` fields have the same id, only one of them
is necessary.
.. attribute:: name: str
The field's logical (Odoo) name within the scope of
its parent.
.. attribute:: string: str
The field's human-readable name (``@string``)
.. attribute:: required: bool
Whether the field is marked as required in the
model. Clients must provide non-empty import values
for all required fields or the import will error out.
.. attribute:: fields: list[Field]
The current field's subfields. The database and
external identifiers for m2o and m2m fields; a
filtered and transformed fields_get for o2m fields (to
a variable depth defined by ``depth``).
Fields with no sub-fields will have an empty list of
sub-fields.
.. attribute:: model_name: str
Used in the Odoo Field Tooltip on the import view
and to get the model of the field of the related field(s).
Name of the current field's model.
.. attribute:: comodel_name: str
Used in the Odoo Field Tooltip on the import view
and to get the model of the field of the related field(s).
Name of the current field's comodel, i.e. if the field is a relation field.
Structure example for 'crm.team' model for returned importable_fields::
[
{'name': 'message_ids', 'string': 'Messages', 'model_name': 'crm.team', 'comodel_name': 'mail.message', 'fields': [
{'name': 'moderation_status', 'string': 'Moderation Status', 'model_name': 'mail.message', 'fields': []},
{'name': 'body', 'string': 'Contents', 'model_name': 'mail.message', 'fields' : []}
]},
{{'name': 'name', 'string': 'Sales Team', 'model_name': 'crm.team', 'fields' : []}
]
:param str model: name of the model to get fields form
:param int depth: depth of recursion into o2m fields
"""
Model = self.env[model]
importable_fields = [{
'id': 'id',
'name': 'id',
'string': _("External ID"),
'required': False,
'fields': [],
'type': 'id',
}]
if not depth:
return importable_fields
model_fields = Model.fields_get()
blacklist = models.MAGIC_COLUMNS + [Model.CONCURRENCY_CHECK_FIELD]
for name, field in model_fields.items():
if name in blacklist:
continue
# an empty string means the field is deprecated, @deprecated must
# be absent or False to mean not-deprecated
if field.get('deprecated', False) is not False:
continue
if field.get('readonly'):
states = field.get('states')
if not states:
continue
# states = {state: [(attr, value), (attr2, value2)], state2:...}
if not any(attr == 'readonly' and value is False
for attr, value in itertools.chain.from_iterable(states.values())):
continue
field_value = {
'id': name,
'name': name,
'string': field['string'],
# Y U NO ALWAYS HAS REQUIRED
'required': bool(field.get('required')),
'fields': [],
'type': field['type'],
'model_name': model
}
if field['type'] in ('many2many', 'many2one'):
field_value['fields'] = [
dict(field_value, name='id', string=_("External ID"), type='id'),
dict(field_value, name='.id', string=_("Database ID"), type='id'),
]
field_value['comodel_name'] = field['relation']
elif field['type'] == 'one2many':
field_value['fields'] = self.get_fields_tree(field['relation'], depth=depth-1)
if self.user_has_groups('base.group_no_one'):
field_value['fields'].append({'id': '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': [], 'type': 'id'})
field_value['comodel_name'] = field['relation']
importable_fields.append(field_value)
# TODO: cache on model?
return importable_fields
def _filter_fields_by_types(self, model_fields_tree, header_types):
""" Remove from model_fields_tree param all the fields and subfields
that do not match the types in header_types
:param: list[dict] model_fields_tree: Contains recursively all the importable fields of the target model.
Generated in "get_fields_tree" method.
:param: list header_types: Contains the extracted fields types of the current header.
Generated in :meth:`_extract_header_types`.
"""
most_likely_fields_tree = []
for field in model_fields_tree:
subfields = field.get('fields')
if subfields:
filtered_field = dict(field) # Avoid modifying fields.
filtered_field['fields'] = self._filter_fields_by_types(subfields, header_types)
most_likely_fields_tree.append(filtered_field)
elif field.get('type') in header_types:
most_likely_fields_tree.append(field)
return most_likely_fields_tree
def _read_file(self, options):
""" Dispatch to specific method to read file content, according to its mimetype or file type
:param dict options: reading options (quoting, separator, ...)
"""
self.ensure_one()
# guess mimetype from file content
mimetype = guess_mimetype(self.file or b'')
(file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))
if handler:
try:
return getattr(self, '_read_' + file_extension)(options)
except ValueError as e:
raise e
except Exception:
_logger.warning("Failed to read file '%s' (transient id %d) using guessed mimetype %s", self.file_name or '<unknown>', self.id, mimetype)
# try reading with user-provided mimetype
(file_extension, handler, req) = FILE_TYPE_DICT.get(self.file_type, (None, None, None))
if handler:
try:
return getattr(self, '_read_' + file_extension)(options)
except ValueError as e:
raise e
except Exception:
_logger.warning("Failed to read file '%s' (transient id %d) using user-provided mimetype %s", self.file_name or '<unknown>', self.id, self.file_type)
# fallback on file extensions as mime types can be unreliable (e.g.
# software setting incorrect mime types, or non-installed software
# leading to browser not sending mime types)
if self.file_name:
p, ext = os.path.splitext(self.file_name)
if ext in EXTENSIONS:
try:
return getattr(self, '_read_' + ext[1:])(options)
except ValueError as e:
raise e
except Exception:
_logger.warning("Failed to read file '%s' (transient id %s) using file extension", self.file_name, self.id)
if req:
raise ImportError(_("Unable to load \"{extension}\" file: requires Python module \"{modname}\"").format(extension=file_extension, modname=req))
raise ValueError(_("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX").format(self.file_type))
def _read_xls(self, options):
book = xlrd.open_workbook(file_contents=self.file or b'')
sheets = options['sheets'] = book.sheet_names()
sheet = options['sheet'] = options.get('sheet') or sheets[0]
return self._read_xls_book(book, sheet)
def _read_xls_book(self, book, sheet_name):
sheet = book.sheet_by_name(sheet_name)
rows = []
# emulate Sheet.get_rows for pre-0.9.4
for rowx, row in enumerate(map(sheet.row, range(sheet.nrows)), 1):
values = []
for colx, cell in enumerate(row, 1):
if cell.ctype is xlrd.XL_CELL_NUMBER:
is_float = cell.value % 1 != 0.0
values.append(
str(cell.value)
if is_float
else str(int(cell.value))
)
elif cell.ctype is xlrd.XL_CELL_DATE:
is_datetime = cell.value % 1 != 0.0
# emulate xldate_as_datetime for pre-0.9.3
dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple(cell.value, book.datemode))
values.append(
dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if is_datetime
else dt.strftime(DEFAULT_SERVER_DATE_FORMAT)
)
elif cell.ctype is xlrd.XL_CELL_BOOLEAN:
values.append(u'True' if cell.value else u'False')
elif cell.ctype is xlrd.XL_CELL_ERROR:
raise ValueError(
_("Invalid cell value at row %(row)s, column %(col)s: %(cell_value)s") % {
'row': rowx,
'col': colx,
'cell_value': xlrd.error_text_from_code.get(cell.value, _("unknown error code %s", cell.value))
}
)
else:
values.append(cell.value)
if any(x for x in values if x.strip()):
rows.append(values)
# return the file length as first value
return sheet.nrows, rows
# use the same method for xlsx and xls files
_read_xlsx = _read_xls
def _read_ods(self, options):
doc = odf_ods_reader.ODSReader(file=io.BytesIO(self.file or b''))
sheets = options['sheets'] = list(doc.SHEETS.keys())
sheet = options['sheet'] = options.get('sheet') or sheets[0]
content = [
row
for row in doc.getSheet(sheet)
if any(x for x in row if x.strip())
]
# return the file length as first value
return len(content), content
def _read_csv(self, options):
""" Returns file length and a CSV-parsed list of all non-empty lines in the file.
:raises csv.Error: if an error is detected during CSV parsing
"""
csv_data = self.file or b''
if not csv_data:
return ()
encoding = options.get('encoding')
if not encoding:
encoding = options['encoding'] = chardet.detect(csv_data)['encoding'].lower()
# some versions of chardet (e.g. 2.3.0 but not 3.x) will return
# utf-(16|32)(le|be), which for python means "ignore / don't strip
# BOM". We don't want that, so rectify the encoding to non-marked
# IFF the guessed encoding is LE/BE and csv_data starts with a BOM
bom = BOM_MAP.get(encoding)
if bom and csv_data.startswith(bom):
encoding = options['encoding'] = encoding[:-2]
if encoding != 'utf-8':
csv_data = csv_data.decode(encoding).encode('utf-8')
separator = options.get('separator')
if not separator:
# default for unspecified separator so user gets a message about
# having to specify it
separator = ','
for candidate in (',', ';', '\t', ' ', '|', unicodedata.lookup('unit separator')):
# pass through the CSV and check if all rows are the same
# length & at least 2-wide assume it's the correct one
it = pycompat.csv_reader(io.BytesIO(csv_data), quotechar=options['quoting'], delimiter=candidate)
w = None
for row in it:
width = len(row)
if w is None:
w = width
if width == 1 or width != w:
break # next candidate
else: # nobreak
separator = options['separator'] = candidate
break
csv_iterator = pycompat.csv_reader(
io.BytesIO(csv_data),
quotechar=options['quoting'],
delimiter=separator)
content = [
row for row in csv_iterator
if any(x for x in row if x.strip())
]
# return the file length as first value
return len(content), content
@api.model
def _extract_header_types(self, preview_values, options):
""" Returns the potential field types, based on the preview values, using heuristics.
This methods is only used for suggested mapping at 2 levels:
1. for fuzzy mapping at file load -> Execute the fuzzy mapping only
on "most likely field types"
2. For "Suggested fields" section in the fields mapping dropdown list at UI side.
The following heuristic is used: If all preview values
- Start with ``__export__``: return id + relational field types
- Can be cast into integer: return id + relational field types, integer, float and monetary
- Can be cast into Boolean: return boolean
- Can be cast into float: return float, monetary
- Can be cast into date/datetime: return date / datetime
- Cannot be cast into any of the previous types: return only text based fields
:param preview_values: list of value for the column to determine
see :meth:`parse_preview` for more details.
:param options: parsing options
"""
values = set(preview_values)
# If all values are empty in preview than can be any field
if values == {''}:
return ['all']
# If all values starts with __export__ this is probably an id
if all(v.startswith('__export__') for v in values):
return ['id', 'many2many', 'many2one', 'one2many']
# If all values can be cast to int type is either id, float or monetary
# Exception: if we only have 1 and 0, it can also be a boolean
if all(v.isdigit() for v in values if v):
field_type = ['integer', 'float', 'monetary']
if {'0', '1', ''}.issuperset(values):
field_type.append('boolean')
return field_type
# If all values are either True or False, type is boolean
if all(val.lower() in ('true', 'false', 't', 'f', '') for val in preview_values):
return ['boolean']
# If all values can be cast to float, type is either float or monetary
try:
thousand_separator = decimal_separator = False
for val in preview_values:
val = val.strip()
if not val:
continue
# value might have the currency symbol left or right from the value
val = self._remove_currency_symbol(val)
if val:
if options.get('float_thousand_separator') and options.get('float_decimal_separator'):
val = val.replace(options['float_thousand_separator'], '').replace(options['float_decimal_separator'], '.')
# We are now sure that this is a float, but we still need to find the
# thousand and decimal separator
else:
if val.count('.') > 1:
options['float_thousand_separator'] = '.'
options['float_decimal_separator'] = ','
elif val.count(',') > 1:
options['float_thousand_separator'] = ','
options['float_decimal_separator'] = '.'
elif val.find('.') > val.find(','):
thousand_separator = ','
decimal_separator = '.'
elif val.find(',') > val.find('.'):
thousand_separator = '.'
decimal_separator = ','
else:
# This is not a float so exit this try
float('a')
if thousand_separator and not options.get('float_decimal_separator'):
options['float_thousand_separator'] = thousand_separator
options['float_decimal_separator'] = decimal_separator
return ['float', 'monetary'] # Allow float to be mapped on a text field.
except ValueError:
pass
results = self._try_match_date_time(preview_values, options)
if results:
return results
# If not boolean, date/datetime, float or integer, only suggest text based fields.
return ['text', 'char', 'binary', 'selection', 'html']
def _try_match_date_time(self, preview_values, options):
# Or a date/datetime if it matches the pattern
date_patterns = [options['date_format']] if options.get(
'date_format') else []
user_date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format
if user_date_format:
try:
to_re(user_date_format)
date_patterns.append(user_date_format)
except KeyError:
pass
date_patterns.extend(DATE_PATTERNS)
match = check_patterns(date_patterns, preview_values)
if match:
options['date_format'] = match
return ['date', 'datetime']
datetime_patterns = [options['datetime_format']] if options.get(
'datetime_format') else []
datetime_patterns.extend(
"%s %s" % (d, t)
for d in date_patterns
for t in TIME_PATTERNS
)
match = check_patterns(datetime_patterns, preview_values)
if match:
options['datetime_format'] = match
return ['datetime']
return []
@api.model
def _extract_headers_types(self, headers, preview, options):
"""
For each column, this method will extract the potential data types based on the preview values
:param list headers: list of headers names. Used as part of key for
returned headers_types to ease understanding of its usage
:param list preview: list of the first file records (see "parse_preview" for more detail) e.g.::
[ ["lead_name1", "1", "partner_id1"], ["lead_name2", "2", "partner_id2"], ... ]
:param options: parsing options
:returns: dict headers_types:
contains all the extracted header types for each header e.g.::
{
(header_index, header_name): ["char", "text", ...],
...
}
"""
headers_types = {}
for column_index, header_name in enumerate(headers):
preview_values = [record[column_index].strip() for record in preview]
type_field = self._extract_header_types(preview_values, options)
headers_types[(column_index, header_name)] = type_field
return headers_types
def _get_mapping_suggestion(self, header, fields_tree, header_types, mapping_fields):
""" Attempts to match a given header to a field of the imported model.
We can distinguish 2 types of header format:
- simple header string that aim to directly match a field of the target model
e.g.: "lead_id" or "Opportunities" or "description".
- composed '/' joined header string that aim to match a field of a
relation field of the target model (= subfield) e.g.:
'lead_id/description' aim to match the field ``description`` of the field lead_id.
When returning result, to ease further treatments, the result is
returned as a list, where each element of the list is a field or
a sub-field of the preceding field.
- ``["lead_id"]`` for simple case = simple matching
- ``["lead_id", "description"]`` for composed case = hierarchy matching
Mapping suggestion is found using the following heuristic:
- first we check if there was a saved mapping by the user
- then try to make an exact match on the field technical name /
english label / translated label
- finally, try the "fuzzy match": word distance between the header
title and the field technical name / english label / translated
label, using the lowest result. The field used for the fuzzy match
are based on the field types we extracted from the header data
(see :meth:`_extract_header_types`).
For subfields, use the same logic.
Word distance is a score between 0 and 1 to express the distance
between two char strings where ``0`` denotes an exact match and
``1`` indicates completely different strings
In order to keep only one column matched per field, we return the
distance. That distance will be used during the deduplicate process
(see :meth:`_deduplicate_mapping_suggestions`) and only the
mapping with the smallest distance will be kept in case of multiple
mapping on the same field. Note that we don't need to return the
distance in case of hierachy mapping as we consider that as an
advanced behaviour. The deduplicate process will ignore hierarchy
mapping. The user will have to manually select on which field he
wants to map what in case of mapping duplicates for sub-fields.
:param str header: header name from the file
:param list fields_tree: list of all the field of the target model
Coming from :meth:`get_fields_tree`
e.g: ``[ { 'name': 'fieldName', 'string': 'fieldLabel', fields: [ { 'name': 'subfieldName', ...} ]} , ... ]``
:param list header_types: Extracted field types for each column in the parsed file, based on its data content.
Coming from :meth:`_extract_header_types`
e.g.: ``['int', 'float', 'char', 'many2one', ...]``
:param dict mapping_fields: contains the previously saved mapping between header and field for the current model.
E.g.: ``{ header_name: field_name }``
:returns: if the header couldn't be matched: an empty dict
else: a dict with the field path and the distance between header and the matched field.
:rtype: ``dict(field_path + Word distance)``
In case of simple matching: ``{'field_path': [field_name], distance: word_distance}``
e.g.: ``{'field_path': ['lead_id'], distance: 0.23254}``
In case of hierarchy matching: ``{'field_path': [parent_field_name, child_field_name, subchild_field_name]}``
e.g.: ``{'field_path': ['lead_id', 'description']}``
"""
if not fields_tree:
return {}
# First, check in saved mapped fields
mapping_field_name = mapping_fields.get(header.lower())
if mapping_field_name and mapping_field_name:
return {
'field_path': [name for name in mapping_field_name.split('/')],
'distance': -1 # Trick to force to keep that match during mapping deduplication.
}
if '/' not in header:
# Then, try exact match
IrTranslation = self.env['ir.translation']
translated_header = IrTranslation._get_source('ir.model.fields,field_description', 'model', self.env.lang, header).lower()
for field in fields_tree:
# exact match found based on the field technical name
if header.casefold() == field['name'].casefold():
break
field_string = field.get('string', '').casefold()
# match found using either user translation, either model defined field label
if translated_header == field_string or header.casefold() == field_string:
break
else:
field = None
if field: # found an exact match, no need to go further
return {
'field_path': [field['name']],
'distance': 0
}
# If no match found, try fuzzy match on fields filtered based on extracted header types
# Filter out fields with types that does not match corresponding header types.
filtered_fields = self._filter_fields_by_types(fields_tree, header_types)
if not filtered_fields:
return {}
min_dist = 1
min_dist_field = False
for field in filtered_fields:
field_string = field.get('string', '').casefold()
# use string distance for fuzzy match only on most likely field types
name_field_dist = self._get_distance(header.casefold(), field['name'].casefold())
string_field_dist = self._get_distance(header.casefold(), field_string)
translated_string_field_dist = self._get_distance(translated_header.casefold(), field_string)
# Keep only the closest mapping suggestion. Note that in case of multiple mapping on the same field,
# a mapping suggestion could be canceled by another one that has a smaller distance on the same field.
# See 'deduplicate_mapping_suggestions' method for more info.
current_field_dist = min([name_field_dist, string_field_dist, translated_string_field_dist])
if current_field_dist < min_dist:
min_dist_field = field['name']
min_dist = current_field_dist
if min_dist < self.FUZZY_MATCH_DISTANCE:
return {
'field_path': [min_dist_field],
'distance': min_dist
}
return {}
# relational field path
field_path = []
subfields_tree = fields_tree
# Iteratively dive into fields tree
for sub_header in header.split('/'):
# Strip sub_header in case spaces are added around '/' for
# readability of paths
# Skip Saved mapping (mapping_field = {})
match = self._get_mapping_suggestion(sub_header.strip(), subfields_tree, header_types, {})
# Any match failure, exit
if not match:
return {}
# prep subfields for next iteration within match['name'][0]
field_name = match['field_path'][0]
subfields_tree = next(item['fields'] for item in subfields_tree if item['name'] == field_name)
field_path.append(field_name)
# No need to return distance for hierarchy mapping
return {'field_path': field_path}
def _get_distance(self, a, b):
""" This method return an index that reflects the distance between the
two given string a and b.
This index is a score between 0 and 1 where ``0`` indicates an exact
match and ``1`` indicates completely different strings.
"""
return 1 - difflib.SequenceMatcher(None, a, b).ratio()
def _get_mapping_suggestions(self, headers, header_types, fields_tree):
""" Attempts to match the imported model's fields to the
titles of the parsed CSV file, if the file is supposed to have
headers.
Returns a dict mapping cell indices to key paths in the ``fields`` tree.
:param list headers: titles of the parsed file
:param dict header_types:
extracted types for each column in the parsed file e.g.::
{
(header_index, header_name): ['int', 'float', 'char', 'many2one',...],
...
}
:param list fields_tree:
list of the target model's fields e.g.::
[
{
'name': 'fieldName',
'string': 'fieldLabel',
'fields': [{ 'name': 'subfieldName', ...}]
},
...
]
:rtype: dict[(int, str), {'field_path': list[str], 'distance': int}]
:returns: mapping_suggestions e.g.:
.. code-block:: python
{
(header_index, header_name): {
'field_path': ['child_id','name'],
'distance': 0
},
...
}
"""
mapping_suggestions = {}
mapping_records = self.env['base_import.mapping'].search_read([('res_model', '=', self.res_model)], ['column_name', 'field_name'])
mapping_fields = {rec['column_name']: rec['field_name'] for rec in mapping_records}
for index, header in enumerate(headers):
match_field = self._get_mapping_suggestion(header, fields_tree, header_types[(index, header)], mapping_fields)
mapping_suggestions[(index, header)] = match_field or None
self._deduplicate_mapping_suggestions(mapping_suggestions)
return mapping_suggestions
def _deduplicate_mapping_suggestions(self, mapping_suggestions):
""" This method is meant to avoid multiple columns to be matched on the same field.
Taking ``mapping_suggestions`` as input, it will check if multiple
columns are mapped to the same field and will only keep the mapping
that has the smallest distance. The other columns that were matched
to the same field are removed from the mapping suggestions.
Hierarchy mapping is considered as advanced and is skipped during this
deduplication process. We consider that multiple mapping on hierarchy
mapping will not occur often and due to the fact that this won't lead
to any particular issues when a non 'char/text' field is selected more
than once in the UI, we keep only the last selected mapping. The
objective is to lighten the mapping suggestion process as much as we can.
:param dict mapping_suggestions: ``{ (column_index, header_name) : { 'field_path': [header_name], 'distance': word_distance }}``
"""
min_dist_per_field = {}
headers_to_keep = []
for header, suggestion in mapping_suggestions.items():
if suggestion is None or len(suggestion['field_path']) > 1:
headers_to_keep.append(header)
continue
field_name = suggestion['field_path'][0]
field_distance = suggestion['distance']
best_distance, _best_header = min_dist_per_field.get(field_name, (1, None))
if field_distance < best_distance:
min_dist_per_field[field_name] = (field_distance, header)
headers_to_keep = headers_to_keep + [value[1] for value in min_dist_per_field.values()]
for header in mapping_suggestions.keys() - headers_to_keep:
del mapping_suggestions[header]
def parse_preview(self, options, count=10):
""" Generates a preview of the uploaded files, and performs
fields-matching between the import's file data and the model's
columns.
If the headers are not requested (not options.has_headers),
returned ``matches`` and ``headers`` are both ``False``.
:param int count: number of preview lines to generate
:param options: format-specific options.
CSV: {quoting, separator, headers}
:type options: {str, str, str, bool}
:returns: ``{fields, matches, headers, preview} | {error, preview}``
:rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
"""
self.ensure_one()
fields_tree = self.get_fields_tree(self.res_model)
try:
file_length, rows = self._read_file(options)
if file_length <= 0:
raise ImportValidationError(_("Import file has no content or is corrupt"))
preview = rows[:count]
# Get file headers
if options.get('has_headers') and preview:
# We need the header types before matching columns to fields
headers = preview.pop(0)
header_types = self._extract_headers_types(headers, preview, options)
else:
header_types, headers = {}, []
# Get matches: the ones already selected by the user or propose a new matching.
matches = {}
# If user checked to the advanced mode, we re-parse the file but we keep the mapping "as is".
# No need to make another mapping proposal
if options.get('keep_matches') and options.get('fields'):
for index, match in enumerate(options.get('fields', [])):
if match:
matches[index] = match.split('/')
elif options.get('has_headers'):
matches = self._get_mapping_suggestions(headers, header_types, fields_tree)
# remove header_name for matches keys as tuples are no supported in json.
# and remove distance from suggestion (keep only the field path) as not used at client side.
matches = {
header_key[0]: suggestion['field_path']
for header_key, suggestion in matches.items()
if suggestion
}
# compute if we should activate advanced mode or not:
# if was already activated of if file contains "relational fields".
if options.get('keep_matches'):
advanced_mode = options.get('advanced')
else:
# Check is label contain relational field
has_relational_header = any(len(models.fix_import_export_id_paths(col)) > 1 for col in headers)
# Check is matches fields have relational field
has_relational_match = any(len(match) > 1 for field, match in matches.items() if match)
advanced_mode = has_relational_header or has_relational_match
# Take first non null values for each column to show preview to users.
# Initially first non null value is displayed to the user.
# On hover preview consists in 5 values.
column_example = []
for column_index, _unused in enumerate(preview[0]):
vals = []
for record in preview:
if record[column_index]:
vals.append("%s%s" % (record[column_index][:50], "..." if len(record[column_index]) > 50 else ""))
if len(vals) == 5:
break
column_example.append(
vals or
[""] # blank value if no example have been found at all for the current column
)
# Batch management
batch = False
batch_cutoff = options.get('limit')
if batch_cutoff:
if count > batch_cutoff:
batch = len(preview) > batch_cutoff
else:
batch = bool(next(
itertools.islice(rows, batch_cutoff - count, None),
None
))
return {
'fields': fields_tree,
'matches': matches or False,
'headers': headers or False,
'header_types': list(header_types.values()) or False,
'preview': column_example,
'options': options,
'advanced_mode': advanced_mode,
'debug': self.user_has_groups('base.group_no_one'),
'batch': batch,
'file_length': file_length
}
except Exception as error:
# Due to lazy generators, UnicodeDecodeError (for
# instance) may only be raised when serializing the
# preview to a list in the return.
_logger.debug("Error during parsing preview", exc_info=True)
preview = None
if self.file_type == 'text/csv' and self.file:
preview = self.file[:ERROR_PREVIEW_BYTES].decode('iso-8859-1')
return {
'error': str(error),
# iso-8859-1 ensures decoding will always succeed,
# even if it yields non-printable characters. This is
# in case of UnicodeDecodeError (or csv.Error
# compounded with UnicodeDecodeError)
'preview': preview,
}
@api.model
def _convert_import_data(self, fields, options):
""" Extracts the input BaseModel and fields list (with
``False``-y placeholders for fields to *not* import) into a
format Model.import_data can use: a fields list without holes
and the precisely matching data matrix
:param list(str|bool): fields
:returns: (data, fields)
:rtype: (list(list(str)), list(str))
:raises ValueError: in case the import data could not be converted
"""
# Get indices for non-empty fields
indices = [index for index, field in enumerate(fields) if field]
if not indices:
raise ImportValidationError(_("You must configure at least one field to import"))
# If only one index, itemgetter will return an atom rather
# than a 1-tuple
if len(indices) == 1:
mapper = lambda row: [row[indices[0]]]
else:
mapper = operator.itemgetter(*indices)
# Get only list of actually imported fields
import_fields = [f for f in fields if f]
_file_length, rows_to_import = self._read_file(options)
if options.get('has_headers'):
rows_to_import = rows_to_import[1:]
data = [
list(row) for row in map(mapper, rows_to_import)
# don't try inserting completely empty rows (e.g. from
# filtering out o2m fields)
if any(row)
]
# slicing needs to happen after filtering out empty rows as the
# data offsets from load are post-filtering
return data[options.get('skip'):], import_fields
@api.model
def _remove_currency_symbol(self, value):
value = value.strip()
negative = False
# Careful that some countries use () for negative so replace it by - sign
if value.startswith('(') and value.endswith(')'):
value = value[1:-1]
negative = True
float_regex = re.compile(r'([+-]?[0-9.,]+)')
split_value = [g for g in float_regex.split(value) if g]
if len(split_value) > 2:
# This is probably not a float
return False
if len(split_value) == 1:
if float_regex.search(split_value[0]) is not None:
return split_value[0] if not negative else '-' + split_value[0]
return False
else:
# String has been split in 2, locate which index contains the float and which does not
currency_index = 0
if float_regex.search(split_value[0]) is not None:
currency_index = 1
# Check that currency exists
currency = self.env['res.currency'].search([('symbol', '=', split_value[currency_index].strip())])
if len(currency):
return split_value[(currency_index + 1) % 2] if not negative else '-' + split_value[(currency_index + 1) % 2]
# Otherwise it is not a float with a currency symbol
return False
@api.model
def _parse_float_from_data(self, data, index, name, options):
for line in data:
line[index] = line[index].strip()
if not line[index]:
continue
thousand_separator, decimal_separator = self._infer_separators(line[index], options)
if 'E' in line[index] or 'e' in line[index]:
tmp_value = line[index].replace(thousand_separator, '.')
try:
tmp_value = '{:f}'.format(float(tmp_value))
line[index] = tmp_value
thousand_separator = ' '
except Exception:
pass
line[index] = line[index].replace(thousand_separator, '').replace(decimal_separator, '.')
old_value = line[index]
line[index] = self._remove_currency_symbol(line[index])
if line[index] is False:
raise ImportValidationError(_("Column %s contains incorrect values (value: %s)", name, old_value), field=name)
def _infer_separators(self, value, options):
""" Try to infer the shape of the separators: if there are two
different "non-numberic" characters in the number, the
former/duplicated one would be grouping ("thousands" separator) and
the latter would be the decimal separator. The decimal separator
should furthermore be unique.
"""
# can't use \p{Sc} using re so handroll it
non_number = [
# any character
c for c in value
# which is not a numeric decoration (() is used for negative
# by accountants)
if c not in '()-+'
# which is not a digit or a currency symbol
if unicodedata.category(c) not in ('Nd', 'Sc')
]
counts = collections.Counter(non_number)
# if we have two non-numbers *and* the last one has a count of 1,
# we probably have grouping & decimal separators
if len(counts) == 2 and counts[non_number[-1]] == 1:
return [character for character, _count in counts.most_common()]
# otherwise get whatever's in the options, or fallback to a default
thousand_separator = options.get('float_thousand_separator', ' ')
decimal_separator = options.get('float_decimal_separator', '.')
return thousand_separator, decimal_separator
def _parse_import_data(self, data, import_fields, options):
""" Lauch first call to :meth:`_parse_import_data_recursive` with an
empty prefix. :meth:`_parse_import_data_recursive` will be run
recursively for each relational field.
"""
return self._parse_import_data_recursive(self.res_model, '', data, import_fields, options)
def _parse_import_data_recursive(self, model, prefix, data, import_fields, options):
# Get fields of type date/datetime
all_fields = self.env[model].fields_get()
for name, field in all_fields.items():
name = prefix + name
if field['type'] in ('date', 'datetime') and name in import_fields:
index = import_fields.index(name)
self._parse_date_from_data(data, index, name, field['type'], options)
# Check if the field is in import_field and is a relational (followed by /)
# Also verify that the field name exactly match the import_field at the correct level.
elif any(name + '/' in import_field and name == import_field.split('/')[prefix.count('/')] for import_field in import_fields):
# Recursive call with the relational as new model and add the field name to the prefix
self._parse_import_data_recursive(field['relation'], name + '/', data, import_fields, options)
elif field['type'] in ('float', 'monetary') and name in import_fields:
# Parse float, sometimes float values from file have currency symbol or () to denote a negative value
# We should be able to manage both case
index = import_fields.index(name)
self._parse_float_from_data(data, index, name, options)
elif field['type'] == 'binary' and field.get('attachment') and any(f in name for f in IMAGE_FIELDS) and name in import_fields:
index = import_fields.index(name)
with requests.Session() as session:
session.stream = True
for num, line in enumerate(data):
if re.match(config.get("import_image_regex", DEFAULT_IMAGE_REGEX), line[index]):
if not self.env.user._can_import_remote_urls():
raise ImportValidationError(
_("You can not import images via URL, check with your administrator or support for the reason."),
field=name, field_type=field['type']
)
line[index] = self._import_image_by_url(line[index], session, name, num)
else:
try:
base64.b64decode(line[index], validate=True)
except ValueError:
raise ImportValidationError(
_("Found invalid image data, images should be imported as either URLs or base64-encoded data."),
field=name, field_type=field['type']
)
return data
def _parse_date_from_data(self, data, index, name, field_type, options):
dt = datetime.datetime
fmt = fields.Date.to_string if field_type == 'date' else fields.Datetime.to_string
d_fmt = options.get('date_format')
dt_fmt = options.get('datetime_format')
for num, line in enumerate(data):
if not line[index]:
continue
v = line[index].strip()
try:
# first try parsing as a datetime if it's one
if dt_fmt and field_type == 'datetime':
try:
line[index] = fmt(dt.strptime(v, dt_fmt))
continue
except ValueError:
pass
# otherwise try parsing as a date whether it's a date
# or datetime
line[index] = fmt(dt.strptime(v, d_fmt))
except ValueError as e:
raise ImportValidationError(
_("Column %s contains incorrect values. Error in line %d: %s") % (name, num + 1, e),
field=name, field_type=field_type
)
except Exception as e:
raise ImportValidationError(
_("Error Parsing Date [%s:L%d]: %s") % (name, num + 1, e),
field=name, field_type=field_type
)
def _import_image_by_url(self, url, session, field, line_number):
""" Imports an image by URL
:param str url: the original field value
:param requests.Session session:
:param str field: name of the field (for logging/debugging)
:param int line_number: 0-indexed line number within the imported file (for logging/debugging)
:return: the replacement value
:rtype: bytes
"""
maxsize = int(config.get("import_image_maxbytes", DEFAULT_IMAGE_MAXBYTES))
_logger.debug("Trying to import image from URL: %s into field %s, at line %s" % (url, field, line_number))
try:
response = session.get(url, timeout=int(config.get("import_image_timeout", DEFAULT_IMAGE_TIMEOUT)))
response.raise_for_status()
if response.headers.get('Content-Length') and int(response.headers['Content-Length']) > maxsize:
raise ImportValidationError(
_("File size exceeds configured maximum (%s bytes)", maxsize),
field=field
)
content = bytearray()
for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE):
content += chunk
if len(content) > maxsize:
raise ImportValidationError(
_("File size exceeds configured maximum (%s bytes)", maxsize),
field=field
)
image = Image.open(io.BytesIO(content))
w, h = image.size
if w * h > 42e6: # Nokia Lumia 1020 photo resolution
raise ImportValidationError(
_("Image size excessive, imported images must be smaller than 42 million pixel"),
field=field
)
return base64.b64encode(content)
except Exception as e:
_logger.exception(e)
raise ImportValidationError(
_(
"Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s",
url=url, field_name=field, line_number=line_number + 1, error=e
),
field=field
)
def execute_import(self, fields, columns, options, dryrun=False):
""" Actual execution of the import
:param fields: import mapping: maps each column to a field,
``False`` for the columns to ignore
:type fields: list(str|bool)
:param columns: columns label
:type columns: list(str|bool)
:param dict options:
:param bool dryrun: performs all import operations (and
validations) but rollbacks writes, allows
getting as much errors as possible without
the risk of clobbering the database.
:returns: A list of errors. If the list is empty the import
executed fully and correctly. If the list is
non-empty it contains dicts with 3 keys:
``type``
the type of error (``error|warning``)
``message``
the error message associated with the error (a string)
``record``
the data which failed to import (or ``false`` if that data
isn't available or provided)
:rtype: dict(ids: list(int), messages: list({type, message, record}))
"""
self.ensure_one()
self._cr.execute('SAVEPOINT import')
try:
input_file_data, import_fields = self._convert_import_data(fields, options)
# Parse date and float field
input_file_data = self._parse_import_data(input_file_data, import_fields, options)
except ImportValidationError as error:
return {'messages': [error.__dict__]}
_logger.info('importing %d rows...', len(input_file_data))
import_fields, merged_data = self._handle_multi_mapping(import_fields, input_file_data)
if options.get('fallback_values'):
merged_data = self._handle_fallback_values(import_fields, merged_data, options['fallback_values'])
name_create_enabled_fields = options.pop('name_create_enabled_fields', {})
import_limit = options.pop('limit', None)
model = self.env[self.res_model].with_context(
import_file=True,
name_create_enabled_fields=name_create_enabled_fields,
import_set_empty_fields=options.get('import_set_empty_fields', []),
import_skip_records=options.get('import_skip_records', []),
_import_limit=import_limit)
import_result = model.load(import_fields, merged_data)
_logger.info('done')
# If transaction aborted, RELEASE SAVEPOINT is going to raise
# an InternalError (ROLLBACK should work, maybe). Ignore that.
# TODO: to handle multiple errors, create savepoint around
# write and release it in case of write error (after
# adding error to errors array) => can keep on trying to
# import stuff, and rollback at the end if there is any
# error in the results.
try:
if dryrun:
self._cr.execute('ROLLBACK TO SAVEPOINT import')
# cancel all changes done to the registry/ormcache
self.pool.clear_caches()
self.pool.reset_changes()
else:
self._cr.execute('RELEASE SAVEPOINT import')
except psycopg2.InternalError:
pass
# Insert/Update mapping columns when import complete successfully
if import_result['ids'] and options.get('has_headers'):
BaseImportMapping = self.env['base_import.mapping']
for index, column_name in enumerate(columns):
if column_name:
# Update to latest selected field
mapping_domain = [('res_model', '=', self.res_model), ('column_name', '=', column_name)]
column_mapping = BaseImportMapping.search(mapping_domain, limit=1)
if column_mapping:
if column_mapping.field_name != fields[index]:
column_mapping.field_name = fields[index]
else:
BaseImportMapping.create({
'res_model': self.res_model,
'column_name': column_name,
'field_name': fields[index]
})
if 'name' in import_fields:
index_of_name = import_fields.index('name')
skipped = options.get('skip', 0)
# pad front as data doesn't contain anythig for skipped lines
r = import_result['name'] = [''] * skipped
# only add names for the window being imported
r.extend(x[index_of_name] for x in input_file_data[:import_limit])
# pad back (though that's probably not useful)
r.extend([''] * (len(input_file_data) - (import_limit or 0)))
else:
import_result['name'] = []
skip = options.get('skip', 0)
# convert load's internal nextrow to the imported file's
if import_result['nextrow']: # don't update if nextrow = 0 (= no nextrow)
import_result['nextrow'] += skip
return import_result
def _handle_multi_mapping(self, import_fields, input_file_data):
""" This method handles multiple mapping on the same field.
It will return the list of the mapped fields and the concatenated data for each field:
- If two column are mapped on the same text or char field, they will end up
in only one column, concatenated via space (char) or new line (text).
- The same logic is used for many2many fields. Multiple values can be
imported if they are separated by ``,``.
Input/output Example:
input data
.. code-block:: python
[
["Value part 1", "1", "res.partner_id1", "Value part 2"],
["I am", "1", "res.partner_id1", "Batman"],
]
import_fields
``[desc, some_number, partner, desc]``
output merged_data
.. code-block:: python
[
["Value part 1 Value part 2", "1", "res.partner_id1"],
["I am Batman", "1", "res.partner_id1"],
]
fields
``[desc, some_number, partner]``
"""
# Get fields and their occurrences indexes
# Among the fields that have been mapped, we get their corresponding mapped column indexes
# as multiple fields could have been mapped to multiple columns.
mapped_field_indexes = {}
for idx, field in enumerate(field for field in import_fields if field):
mapped_field_indexes.setdefault(field, list()).append(idx)
import_fields = list(mapped_field_indexes.keys())
# recreate data and merge duplicates (applies only on text or char fields)
# Also handles multi-mapping on "field of relation fields".
merged_data = []
for record in input_file_data:
new_record = []
for fields, indexes in mapped_field_indexes.items():
split_fields = fields.split('/')
target_field = split_fields[-1]
# get target_field type (on target model)
target_model = self.res_model
for field in split_fields:
if field != target_field: # if not on the last hierarchy level, retarget the model
target_model = self.env[target_model][field]._name
field = self.env[target_model]._fields.get(target_field)
field_type = field.type if field else ''
# merge data if necessary
if field_type == 'char':
new_record.append(' '.join(record[idx] for idx in indexes if record[idx]))
elif field_type == 'text':
new_record.append('\n'.join(record[idx] for idx in indexes if record[idx]))
elif field_type == 'many2many':
new_record.append(','.join(record[idx] for idx in indexes if record[idx]))
else:
new_record.append(record[indexes[0]])
merged_data.append(new_record)
return import_fields, merged_data
def _handle_fallback_values(self, import_field, input_file_data, fallback_values):
"""
If there are fallback values, this method will replace the input file
data value if it does not match the possible values for the given field.
This is only valid for boolean and selection fields.
.. note::
We can consider that we need to retrieve the selection values for
all the fields in fallback_values, as if they are present, it's because
there was already a conflict during first import run and user had to
select a fallback value for the field.
:param: list import_field: ordered list of field that have been matched to import data
:param: list input_file_data: ordered list of values (list) that need to be imported in the given import_fields
:param: dict fallback_values:
contains all the fields that have been tagged by the user to use a
specific fallback value in case the value to import does not match
values accepted by the field (selection or boolean) e.g.::
{
'fieldName': {
'fallback_value': fallback_value,
'field_model': field_model,
'field_type': field_type
},
'state': {
'fallback_value': 'draft',
'field_model': field_model,
'field_type': 'selection'
},
'active': {
'fallback_value': 'true',
'field_model': field_model,
'field_type': 'boolean'
}
}
"""
# add possible selection values into our fallback dictionary for fields of type "selection"
for field_string in fallback_values:
if fallback_values[field_string]['field_type'] != "selection":
continue
field_path = field_string.split('/')
target_field = field_path[-1]
target_model = self.env[fallback_values[field_string]['field_model']]
selection_values = [value.lower() for (key, value) in target_model.fields_get([target_field])[target_field]['selection']]
fallback_values[field_string]['selection_values'] = selection_values
# check fallback values
for record_index, records in enumerate(input_file_data):
for column_index, value in enumerate(records):
field = import_field[column_index]
if field in fallback_values:
fallback_value = fallback_values[field]['fallback_value']
# Boolean
if fallback_values[field]['field_type'] == "boolean":
value = value if value.lower() in ('0', '1', 'true', 'false') else fallback_value
# Selection
elif value.lower() not in fallback_values[field]["selection_values"]:
value = fallback_value if fallback_value != 'skip' else None # don't set any value if we skip
input_file_data[record_index][column_index] = value
return input_file_data
_SEPARATORS = [' ', '/', '-', '']
_PATTERN_BASELINE = [
('%m', '%d', '%Y'),
('%d', '%m', '%Y'),
('%Y', '%m', '%d'),
('%Y', '%d', '%m'),
]
DATE_FORMATS = []
# take the baseline format and duplicate performing the following
# substitution: long year -> short year, numerical month -> short
# month, numerical month -> long month. Each substitution builds on
# the previous two
for ps in _PATTERN_BASELINE:
patterns = {ps}
for s, t in [('%Y', '%y')]:
patterns.update([ # need listcomp: with genexpr "set changed size during iteration"
tuple(t if it == s else it for it in f)
for f in patterns
])
DATE_FORMATS.extend(patterns)
DATE_PATTERNS = [
sep.join(fmt)
for sep in _SEPARATORS
for fmt in DATE_FORMATS
]
TIME_PATTERNS = [
'%H:%M:%S', '%H:%M', '%H', # 24h
'%I:%M:%S %p', '%I:%M %p', '%I %p', # 12h
]
def check_patterns(patterns, values):
for pattern in patterns:
p = to_re(pattern)
for val in values:
if val and not p.match(val):
break
else: # no break, all match
return pattern
return None
def to_re(pattern):
""" cut down version of TimeRE converting strptime patterns to regex
"""
pattern = re.sub(r'\s+', r'\\s+', pattern)
pattern = re.sub('%([a-z])', _replacer, pattern, flags=re.IGNORECASE)
pattern = '^' + pattern + '$'
return re.compile(pattern, re.IGNORECASE)
def _replacer(m):
return _P_TO_RE[m.group(1)]
_P_TO_RE = {
'd': r"(3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(2[0-3]|[0-1]\d|\d)",
'I': r"(1[0-2]|0[1-9]|[1-9])",
'm': r"(1[0-2]|0[1-9]|[1-9])",
'M': r"([0-5]\d|\d)",
'S': r"(6[0-1]|[0-5]\d|\d)",
'y': r"(\d\d)",
'Y': r"(\d\d\d\d)",
'p': r"(am|pm)",
'%': '%',
}
| 45.257738 | 71,643 |
3,580 |
py
|
PYTHON
|
15.0
|
# Copyright 2011 Marco Conti
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sourced from https://github.com/marcoconti83/read-ods-with-odfpy
# further altered locally
from odf import opendocument
from odf.table import Table, TableRow, TableCell
from odf.text import P
class ODSReader(object):
# loads the file
def __init__(self, file=None, content=None, clonespannedcolumns=None):
if not content:
self.clonespannedcolumns = clonespannedcolumns
self.doc = opendocument.load(file)
else:
self.clonespannedcolumns = clonespannedcolumns
self.doc = content
self.SHEETS = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an
# array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
arrCells = []
cells = row.getElementsByType(TableCell)
# for each cell
for count, cell in enumerate(cells, start=1):
# repeated value?
repeat = 0
if count != len(cells):
repeat = cell.getAttribute("numbercolumnsrepeated")
if not repeat:
repeat = 1
spanned = int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = u""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if n.nodeType == 1 and n.tagName == "text:span":
for c in n.childNodes:
if c.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if n.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if textContent:
if not textContent.startswith("#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells.append(textContent)
else:
for rr in range(int(repeat)):
arrCells.append("")
# if row contained something
if arrCells:
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.SHEETS[name] = arrRows
# returns a sheet as an array (rows) of arrays (columns)
def getSheet(self, name):
return self.SHEETS[name]
def getFirstSheet(self):
return next(iter(self.SHEETS.values()))
| 36.530612 | 3,580 |
3,272 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import fields, models
def model(suffix_name):
return 'base_import.tests.models.%s' % suffix_name
class Char(models.Model):
_name = model('char')
_description = 'Tests : Base Import Model, Character'
value = fields.Char()
class CharRequired(models.Model):
_name = model('char.required')
_description = 'Tests : Base Import Model, Character required'
value = fields.Char(required=True)
class CharReadonly(models.Model):
_name = model('char.readonly')
_description = 'Tests : Base Import Model, Character readonly'
value = fields.Char(readonly=True)
class CharStates(models.Model):
_name = model('char.states')
_description = 'Tests : Base Import Model, Character states'
value = fields.Char(readonly=True, states={'draft': [('readonly', False)]})
class CharNoreadonly(models.Model):
_name = model('char.noreadonly')
_description = 'Tests : Base Import Model, Character No readonly'
value = fields.Char(readonly=True, states={'draft': [('invisible', True)]})
class CharStillreadonly(models.Model):
_name = model('char.stillreadonly')
_description = 'Tests : Base Import Model, Character still readonly'
value = fields.Char(readonly=True, states={'draft': [('readonly', True)]})
# TODO: complex field (m2m, o2m, m2o)
class M2o(models.Model):
_name = model('m2o')
_description = 'Tests : Base Import Model, Many to One'
value = fields.Many2one(model('m2o.related'))
class M2oRelated(models.Model):
_name = model('m2o.related')
_description = 'Tests : Base Import Model, Many to One related'
value = fields.Integer(default=42)
class M2oRequired(models.Model):
_name = model('m2o.required')
_description = 'Tests : Base Import Model, Many to One required'
value = fields.Many2one(model('m2o.required.related'), required=True)
class M2oRequiredRelated(models.Model):
_name = model('m2o.required.related')
_description = 'Tests : Base Import Model, Many to One required related'
value = fields.Integer(default=42)
class O2m(models.Model):
_name = model('o2m')
_description = 'Tests : Base Import Model, One to Many'
name = fields.Char()
value = fields.One2many(model('o2m.child'), 'parent_id')
class O2mChild(models.Model):
_name = model('o2m.child')
_description = 'Tests : Base Import Model, One to Many child'
parent_id = fields.Many2one(model('o2m'))
value = fields.Integer()
class PreviewModel(models.Model):
_name = model('preview')
_description = 'Tests : Base Import Model Preview'
name = fields.Char('Name')
somevalue = fields.Integer(string='Some Value', required=True)
othervalue = fields.Integer(string='Other Variable')
class FloatModel(models.Model):
_name = model('float')
_description = 'Tests: Base Import Model Float'
value = fields.Float()
value2 = fields.Monetary()
currency_id = fields.Many2one('res.currency')
class ComplexModel(models.Model):
_name = model('complex')
_description = 'Tests: Base Import Model Complex'
f = fields.Float()
m = fields.Monetary()
c = fields.Char()
currency_id = fields.Many2one('res.currency')
d = fields.Date()
dt = fields.Datetime()
| 30.296296 | 3,272 |
694 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import http
from odoo.http import request
from odoo.tools import misc
class ImportController(http.Controller):
@http.route('/base_import/set_file', methods=['POST'])
def set_file(self, file, import_id, jsonp='callback'):
import_id = int(import_id)
written = request.env['base_import.import'].browse(import_id).write({
'file': file.read(),
'file_name': file.filename,
'file_type': file.content_type,
})
return 'window.top.%s(%s)' % (misc.html_escape(jsonp), json.dumps({'result': written}))
| 30.173913 | 694 |
519 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'CRM Gamification',
'version': '1.0',
'category': 'Sales/CRM',
'depends': ['gamification', 'sale_crm'],
'description': """Example of goal definitions and challenges that can be used related to the usage of the CRM Sale module.""",
'data': ['data/gamification_sale_crm_data.xml'],
'demo': ['data/gamification_sale_crm_demo.xml'],
'auto_install': True,
'license': 'LGPL-3',
}
| 39.923077 | 519 |
739 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'ESC/POS Hardware Driver',
'category': 'Sales/Point of Sale',
'sequence': 6,
'website': 'https://www.odoo.com/app/point-of-sale-hardware',
'summary': 'Hardware Driver for ESC/POS Printers and Cashdrawers',
'description': """
ESC/POS Hardware Driver
=======================
This module allows Odoo to print with ESC/POS compatible printers and
to open ESC/POS controlled cashdrawers in the point of sale and other modules
that would need such functionality.
""",
'external_dependencies': {
'python' : ['pyusb','pyserial','qrcode'],
},
'installable': False,
'license': 'LGPL-3',
}
| 30.791667 | 739 |
32,683 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import base64
import copy
import io
import math
import re
import traceback
import codecs
from hashlib import md5
from PIL import Image
from xml.etree import ElementTree as ET
try:
import jcconv
except ImportError:
jcconv = None
try:
import qrcode
except ImportError:
qrcode = None
from .constants import *
from .exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff, str):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
'_order': 1,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
'_order': 1,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
'_order': 1,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print('WARNING: ESC/POS PRINTING: ignoring invalid value: %s for style %s' % (style[attr], utfstr(attr)))
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print('WARNING: ESC/POS PRINTING: ignoring invalid value: %s for style %s' % (style[attr], attr))
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = sorted(self.cmds, key=lambda x: self.cmds[x]['_order'])
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (int(image_border / 2), int(image_border / 2))
else:
return (int(image_border / 2), int((image_border / 2) + 1))
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = b"%02X%02X%02X%02X" % (int((size[0]/size[1])/8), 0, size[1], 0)
self._raw(codecs.decode(buffer, 'hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(codecs.decode(buffer, "hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = b""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N.encode('utf-8')
buffer = "%02X%02X%02X%02X" % (int((size[0]/size[1])/8), 0, size[1], 0)
raw += codecs.decode(buffer, 'hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += codecs.decode(buffer, 'hex')
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print("WARNING: Image is wider than 512 and could be truncated at print time ")
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print('print_b64_img')
id = md5(img).digest()
if id not in self.img_cache:
print('not in cache')
img = img[img.find(b',')+1:]
f = io.BytesIO(b'img')
f.write(base64.decodebytes(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
channels = img_rgba.split()
if len(channels) > 3:
# use alpha channel as mask
img.paste(img_rgba, mask=channels[3])
else:
img.paste(img_rgba)
print('convert image')
pix_line, img_size = self._convert_image(img)
print('print image')
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print('raw image')
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
# We are using type A commands
# So we need to add the 'NULL' character
# https://github.com/python-escpos/python-escpos/pull/98/files#diff-a0b1df12c7c67e38915adbe469051e2dR444
self._raw('\x00')
else:
raise BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(bytes(elem.attrib['src'], 'utf-8'))
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp1251': TXT_ENC_WPC1251, # win-1251 covers more cyrillic symbols than cp866
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'cp936': TXT_ENC_PC936,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
# First 127 symbols are covered by cp437.
# Extended range is covered by different encodings.
encoded = char.encode(encoding)
if ord(encoded) <= 127:
encoding = 'cp437'
break
except (UnicodeEncodeError, UnicodeWarning, TypeError, ValueError):
#the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
(encoding, _) = remaining.popitem()
else:
encoding = 'cp437'
encoded = b'\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = bytes(encodings[encoding], 'utf-8') + encoded
return encoded
def encode_str(txt):
buffer = b''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer
For some reason, with some printers (ex: Epson TM-m30), the cash drawer
only opens 50% of the time if you just send the pulse. But if you read
the status afterwards, it opens all the time.
"""
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
self.get_printer_status()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| 34.95508 | 32,683 |
7,646 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
""" ESC/POS Commands (Constants) """
# Control characters
ESC = '\x1b'
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# RT Status commands
DLE_EOT_PRINTER = '\x10\x04\x01' # Transmit printer status
DLE_EOT_OFFLINE = '\x10\x04\x02'
DLE_EOT_ERROR = '\x10\x04\x03'
DLE_EOT_PAPER = '\x10\x04\x04'
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer (ESC p <pin> <on time: 2*ms> <off time: 2*ms>)
_CASH_DRAWER = lambda m, t1='', t2='': ESC + 'p' + m + chr(t1) + chr(t2)
CD_KICK_2 = _CASH_DRAWER('\x00', 50, 50) # Sends a pulse to pin 2 []
CD_KICK_5 = _CASH_DRAWER('\x01', 50, 50) # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_DOUBLE = '\x1b\x21\x30' # Double height & Width
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_COLOR_BLACK = '\x1b\x72\x00' # Default Color
TXT_COLOR_RED = '\x1b\x72\x01' # Alternative Color ( Usually Red )
# Text Encoding
TXT_ENC_PC437 = '\x1b\x74\x00' # PC437 USA
TXT_ENC_KATAKANA= '\x1b\x74\x01' # KATAKANA (JAPAN)
TXT_ENC_PC850 = '\x1b\x74\x02' # PC850 Multilingual
TXT_ENC_PC860 = '\x1b\x74\x03' # PC860 Portuguese
TXT_ENC_PC863 = '\x1b\x74\x04' # PC863 Canadian-French
TXT_ENC_PC865 = '\x1b\x74\x05' # PC865 Nordic
TXT_ENC_KANJI6 = '\x1b\x74\x06' # One-pass Kanji, Hiragana
TXT_ENC_KANJI7 = '\x1b\x74\x07' # One-pass Kanji
TXT_ENC_KANJI8 = '\x1b\x74\x08' # One-pass Kanji
TXT_ENC_PC851 = '\x1b\x74\x0b' # PC851 Greek
TXT_ENC_PC853 = '\x1b\x74\x0c' # PC853 Turkish
TXT_ENC_PC857 = '\x1b\x74\x0d' # PC857 Turkish
TXT_ENC_PC737 = '\x1b\x74\x0e' # PC737 Greek
TXT_ENC_8859_7 = '\x1b\x74\x0f' # ISO8859-7 Greek
TXT_ENC_WPC1252 = '\x1b\x74\x10' # WPC1252
TXT_ENC_PC866 = '\x1b\x74\x11' # PC866 Cyrillic #2
TXT_ENC_PC852 = '\x1b\x74\x12' # PC852 Latin2
TXT_ENC_PC858 = '\x1b\x74\x13' # PC858 Euro
TXT_ENC_KU42 = '\x1b\x74\x14' # KU42 Thai
TXT_ENC_TIS11 = '\x1b\x74\x15' # TIS11 Thai
TXT_ENC_TIS18 = '\x1b\x74\x1a' # TIS18 Thai
TXT_ENC_TCVN3 = '\x1b\x74\x1e' # TCVN3 Vietnamese
TXT_ENC_TCVN3B = '\x1b\x74\x1f' # TCVN3 Vietnamese
TXT_ENC_PC720 = '\x1b\x74\x20' # PC720 Arabic
TXT_ENC_WPC775 = '\x1b\x74\x21' # WPC775 Baltic Rim
TXT_ENC_PC855 = '\x1b\x74\x22' # PC855 Cyrillic
TXT_ENC_PC861 = '\x1b\x74\x23' # PC861 Icelandic
TXT_ENC_PC862 = '\x1b\x74\x24' # PC862 Hebrew
TXT_ENC_PC864 = '\x1b\x74\x25' # PC864 Arabic
TXT_ENC_PC869 = '\x1b\x74\x26' # PC869 Greek
TXT_ENC_PC936 = '\x1C\x21\x00' # PC936 GBK(Guobiao Kuozhan)
TXT_ENC_8859_2 = '\x1b\x74\x27' # ISO8859-2 Latin2
TXT_ENC_8859_9 = '\x1b\x74\x28' # ISO8859-2 Latin9
TXT_ENC_PC1098 = '\x1b\x74\x29' # PC1098 Farsi
TXT_ENC_PC1118 = '\x1b\x74\x2a' # PC1118 Lithuanian
TXT_ENC_PC1119 = '\x1b\x74\x2b' # PC1119 Lithuanian
TXT_ENC_PC1125 = '\x1b\x74\x2c' # PC1125 Ukrainian
TXT_ENC_WPC1250 = '\x1b\x74\x2d' # WPC1250 Latin2
TXT_ENC_WPC1251 = '\x1b\x74\x2e' # WPC1251 Cyrillic
TXT_ENC_WPC1253 = '\x1b\x74\x2f' # WPC1253 Greek
TXT_ENC_WPC1254 = '\x1b\x74\x30' # WPC1254 Turkish
TXT_ENC_WPC1255 = '\x1b\x74\x31' # WPC1255 Hebrew
TXT_ENC_WPC1256 = '\x1b\x74\x32' # WPC1256 Arabic
TXT_ENC_WPC1257 = '\x1b\x74\x33' # WPC1257 Baltic Rim
TXT_ENC_WPC1258 = '\x1b\x74\x34' # WPC1258 Vietnamese
TXT_ENC_KZ1048 = '\x1b\x74\x35' # KZ-1048 Kazakhstan
TXT_ENC_KATAKANA_MAP = {
# Maps UTF-8 Katakana symbols to KATAKANA Page Codes
# Half-Width Katakanas
'\xef\xbd\xa1':'\xa1', # 。
'\xef\xbd\xa2':'\xa2', # 「
'\xef\xbd\xa3':'\xa3', # 」
'\xef\xbd\xa4':'\xa4', # 、
'\xef\xbd\xa5':'\xa5', # ・
'\xef\xbd\xa6':'\xa6', # ヲ
'\xef\xbd\xa7':'\xa7', # ァ
'\xef\xbd\xa8':'\xa8', # ィ
'\xef\xbd\xa9':'\xa9', # ゥ
'\xef\xbd\xaa':'\xaa', # ェ
'\xef\xbd\xab':'\xab', # ォ
'\xef\xbd\xac':'\xac', # ャ
'\xef\xbd\xad':'\xad', # ュ
'\xef\xbd\xae':'\xae', # ョ
'\xef\xbd\xaf':'\xaf', # ッ
'\xef\xbd\xb0':'\xb0', # ー
'\xef\xbd\xb1':'\xb1', # ア
'\xef\xbd\xb2':'\xb2', # イ
'\xef\xbd\xb3':'\xb3', # ウ
'\xef\xbd\xb4':'\xb4', # エ
'\xef\xbd\xb5':'\xb5', # オ
'\xef\xbd\xb6':'\xb6', # カ
'\xef\xbd\xb7':'\xb7', # キ
'\xef\xbd\xb8':'\xb8', # ク
'\xef\xbd\xb9':'\xb9', # ケ
'\xef\xbd\xba':'\xba', # コ
'\xef\xbd\xbb':'\xbb', # サ
'\xef\xbd\xbc':'\xbc', # シ
'\xef\xbd\xbd':'\xbd', # ス
'\xef\xbd\xbe':'\xbe', # セ
'\xef\xbd\xbf':'\xbf', # ソ
'\xef\xbe\x80':'\xc0', # タ
'\xef\xbe\x81':'\xc1', # チ
'\xef\xbe\x82':'\xc2', # ツ
'\xef\xbe\x83':'\xc3', # テ
'\xef\xbe\x84':'\xc4', # ト
'\xef\xbe\x85':'\xc5', # ナ
'\xef\xbe\x86':'\xc6', # ニ
'\xef\xbe\x87':'\xc7', # ヌ
'\xef\xbe\x88':'\xc8', # ネ
'\xef\xbe\x89':'\xc9', # ノ
'\xef\xbe\x8a':'\xca', # ハ
'\xef\xbe\x8b':'\xcb', # ヒ
'\xef\xbe\x8c':'\xcc', # フ
'\xef\xbe\x8d':'\xcd', # ヘ
'\xef\xbe\x8e':'\xce', # ホ
'\xef\xbe\x8f':'\xcf', # マ
'\xef\xbe\x90':'\xd0', # ミ
'\xef\xbe\x91':'\xd1', # ム
'\xef\xbe\x92':'\xd2', # メ
'\xef\xbe\x93':'\xd3', # モ
'\xef\xbe\x94':'\xd4', # ヤ
'\xef\xbe\x95':'\xd5', # ユ
'\xef\xbe\x96':'\xd6', # ヨ
'\xef\xbe\x97':'\xd7', # ラ
'\xef\xbe\x98':'\xd8', # リ
'\xef\xbe\x99':'\xd9', # ル
'\xef\xbe\x9a':'\xda', # レ
'\xef\xbe\x9b':'\xdb', # ロ
'\xef\xbe\x9c':'\xdc', # ワ
'\xef\xbe\x9d':'\xdd', # ン
'\xef\xbe\x9e':'\xde', # ゙
'\xef\xbe\x9f':'\xdf', # ゚
}
# Barcod format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
| 39.578947 | 7,520 |
2,868 |
py
|
PYTHON
|
15.0
|
""" ESC/POS Exceptions classes """
class Error(Exception):
""" Base class for ESC/POS errors """
def __init__(self, msg, status=None):
Exception.__init__(self)
self.msg = msg
self.resultcode = 1
if status is not None:
self.resultcode = status
def __str__(self):
return self.msg
# Result/Exit codes
# 0 = success
# 10 = No Barcode type defined
# 20 = Barcode size values are out of range
# 30 = Barcode text not supplied
# 40 = Image height is too large
# 50 = No string supplied to be printed
# 60 = Invalid pin to send Cash Drawer pulse
class BarcodeTypeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 10
def __str__(self):
return "No Barcode type is defined"
class BarcodeSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 20
def __str__(self):
return "Barcode size is out of range"
class BarcodeCodeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 30
def __str__(self):
return "Code was not supplied"
class ImageSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 40
def __str__(self):
return "Image height is longer than 255px and can't be printed"
class TextError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 50
def __str__(self):
return "Text string must be supplied to the text() method"
class CashDrawerError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 60
def __str__(self):
return "Valid pin must be set to send pulse"
class NoStatusError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 70
def __str__(self):
return "Impossible to get status from the printer: " + str(self.msg)
class TicketNotPrinted(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 80
def __str__(self):
return "A part of the ticket was not been printed: " + str(self.msg)
class NoDeviceError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 90
def __str__(self):
return str(self.msg)
class HandleDeviceError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 100
def __str__(self):
return str(self.msg)
| 24.93913 | 2,868 |
8,283 |
py
|
PYTHON
|
15.0
|
#!/usr/bin/python
from __future__ import print_function
import serial
import socket
import usb.core
import usb.util
from .escpos import *
from .constants import *
from .exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=None, out_ep=None):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
# pyusb dropped the 'interface' parameter from usb.Device.write() at 1.0.0b2
# https://github.com/pyusb/pyusb/commit/20cd8c1f79b24082ec999c022b56c3febedc0964#diff-b5a4f98a864952f0f55d569dd14695b7L293
if usb.version_info < (1, 0, 0) or (usb.version_info == (1, 0, 0) and usb.version_info[3] in ("a1", "a2", "a3", "b1")):
self.write_kwargs = dict(interface=self.interface)
else:
self.write_kwargs = {}
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
cfg = self.device.get_active_configuration()
intf = cfg[(0,0)] # first interface
if self.in_ep is None:
# Attempt to detect IN/OUT endpoint addresses
try:
is_IN = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN
is_OUT = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_OUT
endpoint_in = usb.util.find_descriptor(intf, custom_match=is_IN)
endpoint_out = usb.util.find_descriptor(intf, custom_match=is_OUT)
self.in_ep = endpoint_in.bEndpointAddress
self.out_ep = endpoint_out.bEndpointAddress
except usb.core.USBError:
# default values for officially supported printers
self.in_ep = 0x82
self.out_ep = 0x01
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, timeout=5000, **self.write_kwargs):
self.device.write(self.out_ep, self.errorText, **self.write_kwargs)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, **self.write_kwargs)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, **self.write_kwargs)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, **self.write_kwargs)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, **self.write_kwargs)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print("Serial printer enabled")
else:
print("Unable to open serial printer on: %s" % self.devfile)
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print("Could not open socket for %s" % self.host)
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
| 36.328947 | 8,283 |
13,135 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import print_function
import logging
import math
import os
import os.path
import subprocess
import time
import netifaces as ni
import traceback
try:
from .. escpos import *
from .. escpos.exceptions import *
from .. escpos.printer import Usb
except ImportError:
escpos = printer = None
from queue import Queue
from threading import Thread, Lock
try:
import usb.core
except ImportError:
usb = None
from odoo import http, _
from odoo.addons.hw_drivers.controllers import proxy
_logger = logging.getLogger(__name__)
# workaround https://bugs.launchpad.net/openobject-server/+bug/947231
# related to http://bugs.python.org/issue7980
from datetime import datetime
datetime.strptime('2012-01-01', '%Y-%m-%d')
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def connected_usb_devices(self):
connected = []
# printers can either define bDeviceClass=7, or they can define one of
# their interfaces with bInterfaceClass=7. This class checks for both.
class FindUsbClass(object):
def __init__(self, usb_class):
self._class = usb_class
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# transverse all devices and look through their interfaces to
# find a matching class
for cfg in device:
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
printers = usb.core.find(find_all=True, custom_match=FindUsbClass(7))
# if no printers are found after this step we will take the
# first epson or star device we can find.
# epson
if not printers:
printers = usb.core.find(find_all=True, idVendor=0x04b8)
# star
if not printers:
printers = usb.core.find(find_all=True, idVendor=0x0519)
for printer in printers:
try:
description = usb.util.get_string(printer, printer.iManufacturer) + " " + usb.util.get_string(printer, printer.iProduct)
except Exception as e:
_logger.error("Can not get printer description: %s" % e)
description = 'Unknown printer'
connected.append({
'vendor': printer.idVendor,
'product': printer.idProduct,
'name': description
})
return connected
def lockedstart(self):
with self.lock:
if not self.is_alive():
self.daemon = True
self.start()
def get_escpos_printer(self):
printers = self.connected_usb_devices()
if len(printers) > 0:
try:
print_dev = Usb(printers[0]['vendor'], printers[0]['product'])
except HandleDeviceError:
# Escpos printers are now integrated to PrinterDriver, if the IoTBox is printing
# through Cups at the same time, we get an USBError(16, 'Resource busy'). This means
# that the Odoo instance connected to this IoTBox is up to date and no longer uses
# this escpos library.
return None
self.set_status(
'connected',
"Connected to %s (in=0x%02x,out=0x%02x)" % (printers[0]['name'], print_dev.in_ep, print_dev.out_ep)
)
return print_dev
else:
self.set_status('disconnected','Printer Not Found')
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: %s', message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: %s', message)
def run(self):
printer = None
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
error = True
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
error = False
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'status':
pass
error = False
except NoDeviceError as e:
print("No device found %s" % e)
except HandleDeviceError as e:
printer = None
print("Impossible to handle the device due to previous error %s" % e)
except TicketNotPrinted as e:
print("The ticket does not seems to have been fully printed %s" % e)
except NoStatusError as e:
print("Impossible to get the status of the printer %s" % e)
except Exception as e:
self.set_status('error')
_logger.exception(e)
finally:
if error:
self.queue.put((timestamp, task, data))
if printer:
printer.close()
printer = None
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Units' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Units':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
proxy.proxy_drivers['escpos'] = driver
class EscposProxy(proxy.ProxyController):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
| 38.18314 | 13,135 |
2,212 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
{
'name': 'Website Live Chat',
'category': 'Hidden',
'summary': 'Chat with your website visitors',
'version': '1.0',
'description': """
Allow website visitors to chat with the collaborators. This module also brings a feedback tool for the livechat and web pages to display your channel with its ratings on the website.
""",
'depends': ['website', 'im_livechat'],
'installable': True,
'application': False,
'auto_install': True,
'data': [
'views/website_livechat.xml',
'views/res_config_settings_views.xml',
'views/website_livechat_view.xml',
'views/website_visitor_views.xml',
'security/ir.model.access.csv',
'security/website_livechat.xml',
'data/website_livechat_data.xml',
],
'assets': {
'mail.assets_discuss_public': [
'website_livechat/static/src/components/*/*',
'website_livechat/static/src/models/*/*.js',
],
'web.assets_frontend': [
'mail/static/src/js/utils.js',
'im_livechat/static/src/legacy/public_livechat.js',
'website_livechat/static/src/legacy/public_livechat.js',
'im_livechat/static/src/legacy/public_livechat.scss',
'website_livechat/static/src/legacy/public_livechat.scss',
],
'website.assets_editor': [
'website_livechat/static/src/js/**/*',
],
'web.assets_backend': [
'website_livechat/static/src/components/*/*.js',
'website_livechat/static/src/components/*/*.scss',
'website_livechat/static/src/models/*/*.js',
],
'web.assets_tests': [
'website_livechat/static/tests/tours/**/*',
],
'web.qunit_suite_tests': [
'website_livechat/static/src/components/*/tests/*.js',
'website_livechat/static/src/models/*/tests/*.js',
'website_livechat/static/tests/helpers/mock_models.js',
'website_livechat/static/tests/helpers/mock_server.js',
],
'web.assets_qweb': [
'website_livechat/static/src/components/*/*.xml',
],
},
'license': 'LGPL-3',
}
| 38.807018 | 2,212 |
4,209 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import tests
from odoo.addons.website_livechat.tests.common import TestLivechatCommon
@tests.tagged('post_install', '-at_install')
class TestLivechatRequestHttpCase(tests.HttpCase, TestLivechatCommon):
def test_livechat_request_complete_flow(self):
self._clean_livechat_sessions()
# Send first chat request - Open chat from operator side
channel_1 = self._common_chat_request_flow()
# Visitor Rates the conversation (Good)
self._send_rating(channel_1, self.visitor, 5)
# Operator Re-Send a chat request
channel_2 = self._common_chat_request_flow()
# Visitor Rates the conversation (Bad)
self._send_rating(channel_2, self.visitor, 1, "Stop bothering me! I hate you </3 !")
def test_cancel_chat_request_on_visitor_demand(self):
self._clean_livechat_sessions()
self.operator_b = self.env['res.users'].create({
'name': 'Operator Marc',
'login': 'operator_b',
'email': '[email protected]',
'password': "operatormarc",
'livechat_username': "Marco'r El",
})
# Open Chat Request
self.visitor.with_user(self.operator_b).action_send_chat_request()
chat_request = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)])
self.assertEqual(chat_request.livechat_operator_id, self.operator_b.partner_id, "Operator for active livechat session must be Operator Marc")
# Click on livechatbutton at client side
res = self.opener.post(url=self.open_chat_url, json=self.open_chat_params)
self.assertEqual(res.status_code, 200)
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id),
('livechat_active', '=', True)])
# Check that the chat request has been canceled.
chat_request.invalidate_cache()
self.assertEqual(chat_request.livechat_active, False, "The livechat request must be inactive as the visitor started himself a livechat session.")
self.assertEqual(len(channel), 1)
self.assertEqual(channel.livechat_operator_id, self.operator.partner_id, "Operator for active livechat session must be Michel Operator")
def _common_chat_request_flow(self):
self.visitor.with_user(self.operator).action_send_chat_request()
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)])
self.assertEqual(len(channel), 1)
self.assertEqual(channel.livechat_operator_id, self.operator.partner_id, "Michel Operator should be the operator of this channel.")
self.assertEqual(len(channel.message_ids), 0)
# Operator Sends message
self._send_message(channel, self.operator.email, "Hello Again !", author_id=self.operator.partner_id.id)
self.assertEqual(len(channel.message_ids), 1)
# Visitor Answers
self._send_message(channel, self.visitor.display_name, "Answer from Visitor")
self.assertEqual(len(channel.message_ids), 2)
# Visitor Leave the conversation
channel._close_livechat_session()
self.assertEqual(len(channel.message_ids), 3)
self.assertEqual(channel.message_ids[0].author_id, self.env.ref('base.partner_root'), "Odoobot must be the sender of the 'has left the conversation' message.")
self.assertEqual(channel.message_ids[0].body, "<p>%s has left the conversation.</p>" % self.visitor.display_name)
self.assertEqual(channel.livechat_active, False, "The livechat session must be inactive as the visitor sent his feedback.")
return channel
def _clean_livechat_sessions(self):
# clean every possible mail channel linked to the visitor
active_channels = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)])
for active_channel in active_channels:
active_channel._close_livechat_session()
| 53.278481 | 4,209 |
7,113 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from odoo import tests, _
from odoo.addons.website_livechat.tests.common import TestLivechatCommon
@tests.tagged('post_install', '-at_install')
class TestLivechatBasicFlowHttpCase(tests.HttpCase, TestLivechatCommon):
def test_visitor_banner_history(self):
# create visitor history
self.env['website.track'].create([{
'page_id': self.env.ref('website.homepage_page').id,
'visitor_id': self.visitor.id,
'visit_datetime': self.base_datetime,
}, {
'page_id': self.env.ref('website.contactus_page').id,
'visitor_id': self.visitor.id,
'visit_datetime': self.base_datetime - datetime.timedelta(minutes=10),
}, {
'page_id': self.env.ref('website.homepage_page').id,
'visitor_id': self.visitor.id,
'visit_datetime': self.base_datetime - datetime.timedelta(minutes=20),
}])
handmade_history = "%s (21:10) → %s (21:20) → %s (21:30)" % (
self.env.ref('website.homepage_page').name,
self.env.ref('website.contactus_page').name,
self.env.ref('website.homepage_page').name,
)
history = self.env['mail.channel']._get_visitor_history(self.visitor)
self.assertEqual(history, handmade_history)
def test_livechat_username(self):
# Open a new live chat
res = self.opener.post(url=self.open_chat_url, json=self.open_chat_params)
self.assertEqual(res.status_code, 200)
channel_1 = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)], limit=1)
# Check Channel naming
self.assertEqual(channel_1.name, "%s %s" % (self.visitor.display_name, self.operator.livechat_username))
channel_1.unlink()
# Remove livechat_username
self.operator.livechat_username = False
# This fixes an issue in the controller, possibly related to the testing
# environment. The business code unexpectedly uses two cache objects
# (env.cache), which triggers cache misses: a field is computed with its
# value stored into one cache and retrieved from another cache :-/
self.operator.name
# Open a new live chat
res = self.opener.post(url=self.open_chat_url, json=self.open_chat_params)
self.assertEqual(res.status_code, 200)
channel_2 = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)], limit=1)
# Check Channel naming
self.assertEqual(channel_2.name, "%s %s" % (self.visitor.display_name, self.operator.name))
def test_basic_flow_with_rating(self):
channel = self._common_basic_flow()
self._send_rating(channel, self.visitor, 5, "This deboulonnage was fine but not topitop.")
channel._close_livechat_session()
self.assertEqual(len(channel.message_ids), 4)
self.assertEqual(channel.message_ids[0].author_id, self.env.ref('base.partner_root'), "Odoobot must be the sender of the 'has left the conversation' message.")
self.assertEqual(channel.message_ids[0].body, "<p>%s has left the conversation.</p>" % self.visitor.display_name)
self.assertEqual(channel.livechat_active, False, "The livechat session must be inactive as the visitor sent his feedback.")
def test_basic_flow_without_rating(self):
channel = self._common_basic_flow()
# has left the conversation
channel._close_livechat_session()
self.assertEqual(len(channel.message_ids), 3)
self.assertEqual(channel.message_ids[0].author_id, self.env.ref('base.partner_root'), "Odoobot must be the author the message.")
self.assertEqual(channel.message_ids[0].body, "<p>%s has left the conversation.</p>" % self.visitor.display_name)
self.assertEqual(channel.livechat_active, False, "The livechat session must be inactive since visitor has left the conversation.")
def test_visitor_info_access_rights(self):
channel = self._common_basic_flow()
self.authenticate(self.operator.login, 'ideboulonate')
# Retrieve channels information, visitor info should be there
res = self.opener.post(self.message_info_url, json={})
self.assertEqual(res.status_code, 200)
messages_info = res.json().get('result', {})
livechat_info = next(c for c in messages_info['channels'] if c['id'] == channel.id)
self.assertIn('visitor', livechat_info)
# Remove access to visitors and try again, visitors info shouldn't be included
self.operator.groups_id -= self.group_livechat_user
res = self.opener.post(self.message_info_url, json={})
self.assertEqual(res.status_code, 200)
messages_info = res.json().get('result', {})
livechat_info = next(c for c in messages_info['channels'] if c['id'] == channel.id)
self.assertNotIn('visitor', livechat_info)
def _common_basic_flow(self):
# Open a new live chat
res = self.opener.post(url=self.open_chat_url, json=self.open_chat_params)
self.assertEqual(res.status_code, 200)
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor.id), ('livechat_active', '=', True)], limit=1)
# Check Channel and Visitor naming
self.assertEqual(self.visitor.display_name, "%s #%s" % (_("Website Visitor"), self.visitor.id))
self.assertEqual(channel.name, "%s %s" % (self.visitor.display_name, self.operator.livechat_username))
# Post Message from visitor
self._send_message(channel, self.visitor.display_name, "Message from Visitor")
self.assertEqual(len(channel.message_ids), 1)
self.assertEqual(channel.message_ids[0].author_id.id, False, "The author of the message is not a partner.")
self.assertEqual(channel.message_ids[0].email_from, self.visitor.display_name, "The sender's email should be the visitor's email.")
self.assertEqual(channel.message_ids[0].body, "<p>Message from Visitor</p>")
self.assertEqual(channel.livechat_active, True, "The livechat session must be active as the visitor did not left the conversation yet.")
# Post message from operator
self._send_message(channel, self.operator.email, "Message from Operator", author_id=self.operator.partner_id.id)
self.assertEqual(len(channel.message_ids), 2)
self.assertEqual(channel.message_ids[0].author_id, self.operator.partner_id, "The author of the message should be the operator.")
self.assertEqual(channel.message_ids[0].email_from, self.operator.email, "The sender's email should be the operator's email.")
self.assertEqual(channel.message_ids[0].body, "<p>Message from Operator</p>")
self.assertEqual(channel.livechat_active, True, "The livechat session must be active as the visitor did not left the conversation yet.")
return channel
| 53.052239 | 7,109 |
4,949 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import tests, _
from odoo.addons.website_livechat.tests.common import TestLivechatCommon
@tests.tagged('post_install', '-at_install')
class TestLivechatUI(tests.HttpCase, TestLivechatCommon):
def setUp(self):
super(TestLivechatUI, self).setUp()
self.visitor_tour = self.env['website.visitor'].create({
'name': 'Visitor Tour',
'website_id': self.env.ref('website.default_website').id,
})
self.target_visitor = self.visitor_tour
def test_complete_rating_flow_ui(self):
self.start_tour("/", 'website_livechat_complete_flow_tour')
self._check_end_of_rating_tours()
def test_happy_rating_flow_ui(self):
self.start_tour("/", 'website_livechat_happy_rating_tour')
self._check_end_of_rating_tours()
def test_ok_rating_flow_ui(self):
self.start_tour("/", 'website_livechat_ok_rating_tour')
self._check_end_of_rating_tours()
def test_bad_rating_flow_ui(self):
self.start_tour("/", 'website_livechat_sad_rating_tour')
self._check_end_of_rating_tours()
def test_no_rating_flow_ui(self):
self.start_tour("/", 'website_livechat_no_rating_tour')
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id)])
self.assertEqual(len(channel), 1, "There can only be one channel created for 'Visitor Tour'.")
self.assertEqual(channel.livechat_active, False, 'Livechat must be inactive after closing the chat window.')
def test_no_rating_no_close_flow_ui(self):
self.start_tour("/", 'website_livechat_no_rating_no_close_tour')
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id)])
self.assertEqual(len(channel), 1, "There can only be one channel created for 'Visitor Tour'.")
self.assertEqual(channel.livechat_active, True, 'Livechat must be active while the chat window is not closed.')
def test_empty_chat_request_flow_no_rating_no_close_ui(self):
# Open an empty chat request
self.visitor_tour.with_user(self.operator).action_send_chat_request()
chat_request = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id), ('livechat_active', '=', True)])
# Visitor ask a new livechat session before the operator start to send message in chat request session
self.start_tour("/", 'website_livechat_no_rating_no_close_tour')
# Visitor's session must be active (gets the priority)
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id), ('livechat_active', '=', True)])
self.assertEqual(len(channel), 1, "There can only be one channel created for 'Visitor Tour'.")
self.assertEqual(channel.livechat_active, True, 'Livechat must be active while the chat window is not closed.')
# Check that the chat request has been canceled.
chat_request.invalidate_cache()
self.assertEqual(chat_request.livechat_active, False, "The livechat request must be inactive as the visitor started himself a livechat session.")
def test_chat_request_flow_with_rating_ui(self):
# Open a chat request
self.visitor_tour.with_user(self.operator).action_send_chat_request()
chat_request = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id), ('livechat_active', '=', True)])
# Operator send a message to the visitor
self._send_message(chat_request, self.operator.email, "Hello my friend !", author_id=self.operator.partner_id.id)
self.assertEqual(len(chat_request.message_ids), 1, "Number of messages incorrect.")
# Visitor comes to the website and receives the chat request
self.start_tour("/", 'website_livechat_chat_request_part_1_no_close_tour')
# Check that the current session is the chat request
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id), ('livechat_active', '=', True)])
self.assertEqual(len(channel), 1, "There can only be one channel created for 'Visitor Tour'.")
self.assertEqual(channel, chat_request, "The active livechat session must be the chat request one.")
# Visitor reload the page and continues the chat with the operator normally
self.start_tour("/", 'website_livechat_chat_request_part_2_end_session_tour')
self._check_end_of_rating_tours()
def _check_end_of_rating_tours(self):
channel = self.env['mail.channel'].search([('livechat_visitor_id', '=', self.visitor_tour.id)])
self.assertEqual(len(channel), 1, "There can only be one channel created for 'Visitor Tour'.")
self.assertEqual(channel.livechat_active, False, 'Livechat must be inactive after rating.')
| 56.885057 | 4,949 |
4,325 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, tests
class TestLivechatCommon(tests.TransactionCase):
def setUp(self):
super(TestLivechatCommon, self).setUp()
self.base_datetime = fields.Datetime.from_string("2019-11-11 21:30:00")
self.group_user = self.env.ref('base.group_user')
self.group_livechat_user = self.env.ref('im_livechat.im_livechat_group_user')
self.operator = self.env['res.users'].create({
'name': 'Operator Michel',
'login': 'operator',
'email': '[email protected]',
'password': "ideboulonate",
'livechat_username': 'El Deboulonnator',
'groups_id': [(6, 0, [
self.group_user.id,
self.group_livechat_user.id,
])],
})
self.livechat_channel = self.env['im_livechat.channel'].create({
'name': 'The basic channel',
'user_ids': [(6, 0, [self.operator.id])]
})
self.max_sessions_per_operator = 5
visitor_vals = {
'lang_id': self.env.ref('base.lang_en').id,
'country_id': self.env.ref('base.be').id,
'website_id': self.env.ref('website.default_website').id,
}
self.visitors = self.env['website.visitor'].create([{
'lang_id': self.env.ref('base.lang_en').id,
'country_id': self.env.ref('base.de').id,
'website_id': self.env.ref('website.default_website').id,
'partner_id': self.env.ref('base.user_demo').partner_id.id,
}] + [visitor_vals]*self.max_sessions_per_operator)
self.visitor_demo, self.visitor = self.visitors[0], self.visitors[1]
base_url = self.livechat_channel.get_base_url()
self.open_chat_url = base_url + "/im_livechat/get_session"
self.open_chat_params = {'params': {
'channel_id': self.livechat_channel.id,
'anonymous_name': "Wrong Name"
}}
self.send_feedback_url = base_url + "/im_livechat/feedback"
self.leave_session_url = base_url + "/im_livechat/visitor_leave_session"
self.message_info_url = base_url + "/mail/init_messaging"
# override the get_available_users to return only Michel as available
operators = self.operator
def get_available_users(self):
return operators
self.patch(type(self.env['im_livechat.channel']), '_get_available_users', get_available_users)
# override the _get_visitor_from_request to return self.visitor
self.target_visitor = self.visitor
def get_visitor_from_request(self_mock, **kwargs):
return self.target_visitor
self.patch(type(self.env['website.visitor']), '_get_visitor_from_request', get_visitor_from_request)
def _send_message(self, channel, email_from, body, author_id=False):
# As bus is unavailable in test mode, we cannot call /mail/chat_post route to post a message.
# Instead, we post directly the message on the given channel.
channel.with_context(mail_create_nosubscribe=True) \
.message_post(author_id=author_id, email_from=email_from, body=body,
message_type='comment', subtype_id=self.env.ref('mail.mt_comment').id)
def _send_rating(self, channel, visitor, rating_value, reason=False):
channel_messages_count = len(channel.message_ids)
rating_to_emoji = {1: "😞", 3: "😐", 5: "😊"}
self.opener.post(url=self.send_feedback_url, json={'params': {
'uuid': channel.uuid,
'rate': rating_value,
'reason': reason,
}})
res_model_id = self.env['ir.model'].sudo().search([('model', '=', channel._name)], limit=1).id
rating = self.env['rating.rating'].search([('res_id', '=', channel.id), ('res_model_id', '=', res_model_id)])
self.assertEqual(rating.rating, rating_value, "The rating is not correct.")
message = "Rating: %s" % rating_to_emoji[rating_value]
if reason:
message += " \n%s" % reason
self._send_message(channel, visitor.display_name, message, author_id=False)
self.assertEqual(len(channel.message_ids), channel_messages_count + 1)
| 45.914894 | 4,316 |
371 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class IrHttp(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _get_translation_frontend_modules_name(cls):
mods = super(IrHttp, cls)._get_translation_frontend_modules_name()
return mods + ['im_livechat']
| 28.538462 | 371 |
804 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields
from odoo.addons.http_routing.models.ir_http import slug
from odoo.tools.translate import html_translate
class ImLivechatChannel(models.Model):
_name = 'im_livechat.channel'
_inherit = ['im_livechat.channel', 'website.published.mixin']
def _compute_website_url(self):
super(ImLivechatChannel, self)._compute_website_url()
for channel in self:
channel.website_url = "/livechat/channel/%s" % (slug(channel),)
website_description = fields.Html("Website description", default=False, help="Description of the channel displayed on the website page", sanitize_attributes=False, translate=html_translate, sanitize_form=False)
| 42.315789 | 804 |
1,380 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, _
class ImLivechatChannel(models.Model):
_inherit = 'im_livechat.channel'
def _get_livechat_mail_channel_vals(self, anonymous_name, operator, user_id=None, country_id=None):
mail_channel_vals = super(ImLivechatChannel, self)._get_livechat_mail_channel_vals(anonymous_name, operator, user_id=user_id, country_id=country_id)
visitor_sudo = self.env['website.visitor']._get_visitor_from_request()
if visitor_sudo:
mail_channel_vals['livechat_visitor_id'] = visitor_sudo.id
if not user_id:
mail_channel_vals['anonymous_name'] = visitor_sudo.display_name + (' (%s)' % visitor_sudo.country_id.name if visitor_sudo.country_id else '')
# As chat requested by the visitor, delete the chat requested by an operator if any to avoid conflicts between two flows
# TODO DBE : Move this into the proper method (open or init mail channel)
chat_request_channel = self.env['mail.channel'].sudo().search([('livechat_visitor_id', '=', visitor_sudo.id), ('livechat_active', '=', True)])
for mail_channel in chat_request_channel:
mail_channel._close_livechat_session(cancel=True, operator=operator.name)
return mail_channel_vals
| 60 | 1,380 |
356 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
channel_id = fields.Many2one('im_livechat.channel', string='Website Live Channel', related='website_id.channel_id', readonly=False)
| 35.6 | 356 |
2,964 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, _
from odoo.addons.http_routing.models.ir_http import url_for
class Website(models.Model):
_inherit = "website"
channel_id = fields.Many2one('im_livechat.channel', string='Website Live Chat Channel')
def get_livechat_channel_info(self):
""" Get the livechat info dict (button text, channel name, ...) for the livechat channel of
the current website.
"""
self.ensure_one()
if self.channel_id:
livechat_info = self.channel_id.sudo().get_livechat_info()
if livechat_info['available']:
livechat_request_session = self._get_livechat_request_session()
if livechat_request_session:
livechat_info['options']['chat_request_session'] = livechat_request_session
return livechat_info
return {}
def _get_livechat_request_session(self):
"""
Check if there is an opened chat request for the website livechat channel and the current visitor (from request).
If so, prepare the livechat session information that will be stored in visitor's cookies
and used by livechat widget to directly open this session instead of allowing the visitor to
initiate a new livechat session.
:param {int} channel_id: channel
:return: {dict} livechat request session information
"""
visitor = self.env['website.visitor']._get_visitor_from_request()
if visitor:
# get active chat_request linked to visitor
chat_request_channel = self.env['mail.channel'].sudo().search([
('livechat_visitor_id', '=', visitor.id),
('livechat_channel_id', '=', self.channel_id.id),
('livechat_active', '=', True),
('has_message', '=', True)
], order='create_date desc', limit=1)
if chat_request_channel:
return {
"folded": False,
"id": chat_request_channel.id,
"operator_pid": [
chat_request_channel.livechat_operator_id.id,
chat_request_channel.livechat_operator_id.user_livechat_username or chat_request_channel.livechat_operator_id.display_name,
chat_request_channel.livechat_operator_id.user_livechat_username,
],
"name": chat_request_channel.name,
"uuid": chat_request_channel.uuid,
"type": "chat_request"
}
return {}
def get_suggested_controllers(self):
suggested_controllers = super(Website, self).get_suggested_controllers()
suggested_controllers.append((_('Live Support'), url_for('/livechat'), 'website_livechat'))
return suggested_controllers
| 46.3125 | 2,964 |
3,881 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import AccessError
class MailChannel(models.Model):
_inherit = 'mail.channel'
livechat_visitor_id = fields.Many2one('website.visitor', string='Visitor')
def _execute_channel_pin(self, pinned=False):
""" Override to clean an empty livechat channel.
This is typically called when the operator send a chat request to a website.visitor
but don't speak to him and closes the chatter.
This allows operators to send the visitor a new chat request.
If active empty livechat channel,
delete mail_channel as not useful to keep empty chat
"""
super(MailChannel, self)._execute_channel_pin(pinned)
if self.livechat_active and not self.message_ids:
self.sudo().unlink()
def channel_info(self):
"""
Override to add visitor information on the mail channel infos.
This will be used to display a banner with visitor informations
at the top of the livechat channel discussion view in discuss module.
"""
channel_infos = super().channel_info()
channel_infos_dict = dict((c['id'], c) for c in channel_infos)
for channel in self.filtered('livechat_visitor_id'):
visitor = channel.livechat_visitor_id
try:
channel_infos_dict[channel.id]['visitor'] = {
'display_name': visitor.display_name,
'country_code': visitor.country_id.code.lower() if visitor.country_id else False,
'country_id': visitor.country_id.id,
'id': visitor.id,
'is_connected': visitor.is_connected,
'history': self.sudo()._get_visitor_history(visitor),
'website_name': visitor.website_id.name,
'lang_name': visitor.lang_id.name,
'partner_id': visitor.partner_id.id,
}
except AccessError:
pass
return list(channel_infos_dict.values())
def _get_visitor_history(self, visitor):
"""
Prepare history string to render it in the visitor info div on discuss livechat channel view.
:param visitor: website.visitor of the channel
:return: arrow separated string containing navigation history information
"""
recent_history = self.env['website.track'].search([('page_id', '!=', False), ('visitor_id', '=', visitor.id)], limit=3)
return ' → '.join(visit.page_id.name + ' (' + visit.visit_datetime.strftime('%H:%M') + ')' for visit in reversed(recent_history))
def _get_visitor_leave_message(self, operator=False, cancel=False):
name = _('The visitor') if not self.livechat_visitor_id else self.livechat_visitor_id.display_name
if cancel:
message = _("""%s has started a conversation with %s.
The chat request has been canceled.""") % (name, operator or _('an operator'))
else:
message = _('%s has left the conversation.', name)
return message
@api.returns('mail.message', lambda value: value.id)
def message_post(self, **kwargs):
"""Override to mark the visitor as still connected.
If the message sent is not from the operator (so if it's the visitor or
odoobot sending closing chat notification, the visitor last action date is updated."""
message = super(MailChannel, self).message_post(**kwargs)
message_author_id = message.author_id
visitor = self.livechat_visitor_id
if len(self) == 1 and visitor and message_author_id != self.livechat_operator_id:
visitor._update_visitor_last_visit()
return message
| 48.4875 | 3,879 |
6,840 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import json
from odoo import models, api, fields, _
from odoo.exceptions import UserError
from odoo.http import request
from odoo.tools.sql import column_exists, create_column
class WebsiteVisitor(models.Model):
_inherit = 'website.visitor'
livechat_operator_id = fields.Many2one('res.partner', compute='_compute_livechat_operator_id', store=True, string='Speaking with')
livechat_operator_name = fields.Char('Operator Name', related="livechat_operator_id.name")
mail_channel_ids = fields.One2many('mail.channel', 'livechat_visitor_id',
string="Visitor's livechat channels", readonly=True)
session_count = fields.Integer('# Sessions', compute="_compute_session_count")
def _auto_init(self):
# Skip the computation of the field `livechat_operator_id` at the module installation
# We can assume no livechat operator attributed to visitor if it was not installed
if not column_exists(self.env.cr, "website_visitor", "livechat_operator_id"):
create_column(self.env.cr, "website_visitor", "livechat_operator_id", "int4")
return super()._auto_init()
@api.depends('mail_channel_ids.livechat_active', 'mail_channel_ids.livechat_operator_id')
def _compute_livechat_operator_id(self):
results = self.env['mail.channel'].search_read(
[('livechat_visitor_id', 'in', self.ids), ('livechat_active', '=', True)],
['livechat_visitor_id', 'livechat_operator_id']
)
visitor_operator_map = {int(result['livechat_visitor_id'][0]): int(result['livechat_operator_id'][0]) for result in results}
for visitor in self:
visitor.livechat_operator_id = visitor_operator_map.get(visitor.id, False)
@api.depends('mail_channel_ids')
def _compute_session_count(self):
sessions = self.env['mail.channel'].search([('livechat_visitor_id', 'in', self.ids)])
session_count = dict.fromkeys(self.ids, 0)
for session in sessions.filtered(lambda c: c.message_ids):
session_count[session.livechat_visitor_id.id] += 1
for visitor in self:
visitor.session_count = session_count.get(visitor.id, 0)
def action_send_chat_request(self):
""" Send a chat request to website_visitor(s).
This creates a chat_request and a mail_channel with livechat active flag.
But for the visitor to get the chat request, the operator still has to speak to the visitor.
The visitor will receive the chat request the next time he navigates to a website page.
(see _handle_webpage_dispatch for next step)"""
# check if visitor is available
unavailable_visitors_count = self.env['mail.channel'].search_count([('livechat_visitor_id', 'in', self.ids), ('livechat_active', '=', True)])
if unavailable_visitors_count:
raise UserError(_('Recipients are not available. Please refresh the page to get latest visitors status.'))
# check if user is available as operator
for website in self.mapped('website_id'):
if not website.channel_id:
raise UserError(_('No Livechat Channel allows you to send a chat request for website %s.', website.name))
self.website_id.channel_id.write({'user_ids': [(4, self.env.user.id)]})
# Create chat_requests and linked mail_channels
mail_channel_vals_list = []
for visitor in self:
operator = self.env.user
country = visitor.country_id
visitor_name = "%s (%s)" % (visitor.display_name, country.name) if country else visitor.display_name
channel_partner_to_add = [(4, operator.partner_id.id)]
if visitor.partner_id:
channel_partner_to_add.append((4, visitor.partner_id.id))
else:
channel_partner_to_add.append((4, self.env.ref('base.public_partner').id))
mail_channel_vals_list.append({
'channel_partner_ids': channel_partner_to_add,
'livechat_channel_id': visitor.website_id.channel_id.id,
'livechat_operator_id': self.env.user.partner_id.id,
'channel_type': 'livechat',
'public': 'private',
'country_id': country.id,
'anonymous_name': visitor_name,
'name': ', '.join([visitor_name, operator.livechat_username if operator.livechat_username else operator.name]),
'livechat_visitor_id': visitor.id,
'livechat_active': True,
})
if mail_channel_vals_list:
mail_channels = self.env['mail.channel'].create(mail_channel_vals_list)
# Open empty chatter to allow the operator to start chatting with the visitor.
channel_members = self.env['mail.channel.partner'].sudo().search([
('partner_id', '=', self.env.user.partner_id.id),
('channel_id', 'in', mail_channels.ids),
])
channel_members.write({
'fold_state': 'open',
'is_minimized': True,
})
mail_channels_info = mail_channels.channel_info()
notifications = []
for mail_channel_info in mail_channels_info:
notifications.append([operator.partner_id, 'website_livechat.send_chat_request', mail_channel_info])
self.env['bus.bus']._sendmany(notifications)
def _link_to_visitor(self, target, keep_unique=True):
""" Copy sessions of the secondary visitors to the main partner visitor. """
if target.partner_id:
target.mail_channel_ids |= self.mail_channel_ids
super(WebsiteVisitor, self)._link_to_visitor(target, keep_unique=keep_unique)
def _link_to_partner(self, partner, update_values=None):
""" Adapt partner in members of related livechats """
if partner:
self.mail_channel_ids.channel_partner_ids = [
(3, self.env.ref('base.public_partner').id),
(4, partner.id),
]
super(WebsiteVisitor, self)._link_to_partner(partner, update_values=update_values)
def _create_visitor(self):
visitor = super(WebsiteVisitor, self)._create_visitor()
mail_channel_uuid = json.loads(request.httprequest.cookies.get('im_livechat_session', '{}')).get('uuid')
if mail_channel_uuid:
mail_channel = request.env["mail.channel"].sudo().search([("uuid", "=", mail_channel_uuid)])
mail_channel.write({
'livechat_visitor_id': visitor.id,
'anonymous_name': visitor.display_name
})
return visitor
| 53.858268 | 6,840 |
930 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.http import Controller, request, route
class TestBusController(Controller):
"""
This controller is only useful for test purpose. Bus is unavailable in test mode, but there is no way to know,
at client side, if we are running in test mode or not. This route can be called while running tours to mock
some behaviour in function of the test mode status (activated or not).
E.g. : To test the livechat and to check there is no duplicates in message displayed in the chatter,
in test mode, we need to mock a 'message added' notification that is normally triggered by the bus.
In Normal mode, the bus triggers itself the notification.
"""
@route('/bus/test_mode_activated', type="json", auth="public")
def is_test_mode_activated(self):
return request.registry.in_test_mode()
| 48.947368 | 930 |
3,489 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http, _
from odoo.http import request
from odoo.addons.im_livechat.controllers.main import LivechatController
class WebsiteLivechat(LivechatController):
@http.route('/livechat', type='http', auth="public", website=True, sitemap=True)
def channel_list(self, **kw):
# display the list of the channel
channels = request.env['im_livechat.channel'].search([('website_published', '=', True)])
values = {
'channels': channels
}
return request.render('website_livechat.channel_list_page', values)
@http.route('/livechat/channel/<model("im_livechat.channel"):channel>', type='http', auth='public', website=True, sitemap=True)
def channel_rating(self, channel, **kw):
# get the last 100 ratings and the repartition per grade
domain = [
('res_model', '=', 'mail.channel'), ('res_id', 'in', channel.sudo().channel_ids.ids),
('consumed', '=', True), ('rating', '>=', 1),
]
ratings = request.env['rating.rating'].sudo().search(domain, order='create_date desc', limit=100)
repartition = channel.sudo().channel_ids.rating_get_grades(domain=domain)
# compute percentage
percentage = dict.fromkeys(['great', 'okay', 'bad'], 0)
for grade in repartition:
percentage[grade] = round(repartition[grade] * 100.0 / sum(repartition.values()), 1) if sum(repartition.values()) else 0
# filter only on the team users that worked on the last 100 ratings and get their detailed stat
ratings_per_partner = {partner_id: dict(great=0, okay=0, bad=0)
for partner_id in ratings.mapped('rated_partner_id.id')}
total_ratings_per_partner = dict.fromkeys(ratings.mapped('rated_partner_id.id'), 0)
# keep 10 for backward compatibility
rating_texts = {10: 'great', 5: 'great', 3: 'okay', 1: 'bad'}
for rating in ratings:
partner_id = rating.rated_partner_id.id
ratings_per_partner[partner_id][rating_texts[rating.rating]] += 1
total_ratings_per_partner[partner_id] += 1
for partner_id, rating in ratings_per_partner.items():
for k, v in ratings_per_partner[partner_id].items():
ratings_per_partner[partner_id][k] = round(100 * v / total_ratings_per_partner[partner_id], 1)
# the value dict to render the template
values = {
'main_object': channel,
'channel': channel,
'ratings': ratings,
'team': channel.sudo().user_ids,
'percentage': percentage,
'ratings_per_user': ratings_per_partner
}
return request.render("website_livechat.channel_page", values)
@http.route('/im_livechat/get_session', type="json", auth='public', cors="*")
def get_session(self, channel_id, anonymous_name, previous_operator_id=None, **kwargs):
""" Override to use visitor name instead of 'Visitor' whenever a visitor start a livechat session. """
visitor_sudo = request.env['website.visitor']._get_visitor_from_request()
if visitor_sudo:
anonymous_name = visitor_sudo.with_context(lang=visitor_sudo.lang_id.code).display_name
return super(WebsiteLivechat, self).get_session(channel_id, anonymous_name, previous_operator_id=previous_operator_id, **kwargs)
| 51.308824 | 3,489 |
871 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Quiz on Live Event Tracks',
'category': 'Hidden',
'version': '1.0',
'summary': 'Bridge module to support quiz features during "live" tracks. ',
'website': 'https://www.odoo.com/app/events',
'description': "",
'depends': [
'website_event_track_live',
'website_event_track_quiz',
],
'data': [
'views/event_track_templates_page.xml',
],
'demo': [
],
'application': False,
'installable': True,
'auto_install': True,
'assets': {
'web.assets_frontend': [
'website_event_track_live_quiz/static/src/js/**/*',
],
'web.assets_qweb': [
'website_event_track_live_quiz/static/src/xml/**/*',
],
},
'license': 'LGPL-3',
}
| 26.393939 | 871 |
564 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website_event_track_live.controllers.track_live import EventTrackLiveController
class EventTrackLiveQuizController(EventTrackLiveController):
def _prepare_track_suggestion_values(self, track, track_suggestion):
res = super(EventTrackLiveQuizController, self)._prepare_track_suggestion_values(track, track_suggestion)
res['current_track']['show_quiz'] = bool(track.quiz_id) and not track.is_quiz_completed
return res
| 47 | 564 |
379 |
py
|
PYTHON
|
15.0
|
{
'name': 'Website Sale Delivery Giftcard',
'category': 'Website/Website',
'version': '1.0',
'depends': ['website_sale_delivery', 'website_sale_gift_card'],
'installable': True,
'auto_install': True,
'assets': {
'web.assets_tests': [
'website_sale_delivery_giftcard/static/tests/**/*',
],
},
'license': 'LGPL-3',
}
| 27.071429 | 379 |
1,917 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import HttpCase
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestWebsiteSaleDelivery(HttpCase):
def setUp(self):
super().setUp()
self.env['product.product'].create({
'name': 'Acoustic Bloc Screens',
'list_price': 2950.0,
'website_published': True,
})
self.gift_card = self.env['gift.card'].create({
'initial_amount': 10000,
'code': '123456',
})
self.product_delivery_normal1 = self.env['product.product'].create({
'name': 'Normal Delivery Charges',
'invoice_policy': 'order',
'type': 'service',
})
self.product_delivery_normal2 = self.env['product.product'].create({
'name': 'Normal Delivery Charges',
'invoice_policy': 'order',
'type': 'service',
})
self.normal_delivery = self.env['delivery.carrier'].create({
'name': 'delivery1',
'fixed_price': 5,
'delivery_type': 'fixed',
'website_published': True,
'product_id': self.product_delivery_normal1.id,
})
self.normal_delivery2 = self.env['delivery.carrier'].create({
'name': 'delivery2',
'fixed_price': 10,
'delivery_type': 'fixed',
'website_published': True,
'product_id': self.product_delivery_normal2.id,
})
def test_shop_sale_gift_card_keep_delivery(self):
#get admin user and set his preferred shipping method to normal delivery
admin_user = self.env.ref('base.user_admin')
admin_user.partner_id.write({'property_delivery_carrier_id': self.normal_delivery.id})
self.start_tour("/", 'shop_sale_giftcard_delivery', login='admin')
| 34.854545 | 1,917 |
1,444 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
"name": "Finnish Localization",
"version": "13.0.1",
"author": "Avoin.Systems, "
"Tawasta, "
"Vizucom, "
"Sprintit",
"category": "Accounting/Localizations/Account Charts",
"description": """
This is the Odoo module to manage the accounting in Finland.
============================================================
After installing this module, you'll have access to :
* Finnish chart of account
* Fiscal positions
* Invoice Payment Reference Types (Finnish Standard Reference & Finnish Creditor Reference (RF))
* Finnish Reference format for Sale Orders
Set the payment reference type from the Sales Journal.
""",
"depends": [
'account',
'base_iban',
'base_vat',
],
"data": [
'data/account_account_tag_data.xml',
'data/account_chart_template_data.xml',
'data/account.account.template.csv',
'data/account_tax_report_line.xml',
'data/account_tax_group_data.xml',
'data/account_tax_template_data.xml',
'data/l10n_fi_chart_post_data.xml',
'data/account_fiscal_position_template_data.xml',
'data/account_chart_template_configuration_data.xml'
],
"demo": [
'demo/demo_company.xml',
],
"installable": True,
'license': 'LGPL-3',
}
| 32.088889 | 1,444 |
1,742 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
@tagged('post_install_l10n', 'post_install', '-at_install')
class InvoiceGetReferenceTest(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref='l10n_fi.fi_chart_template'):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.invoice = cls.init_invoice('out_invoice', products=cls.product_a+cls.product_b)
def test_get_reference_finnish_invoice(self):
self.assertFalse(self.invoice.payment_reference)
self.invoice.journal_id.invoice_reference_model = 'fi'
self.invoice.action_post()
self.assertTrue(self.invoice.payment_reference)
def test_get_reference_finnish_partner(self):
self.assertFalse(self.invoice.payment_reference)
self.invoice.journal_id.invoice_reference_type = 'partner'
self.invoice.journal_id.invoice_reference_model = 'fi'
self.invoice.action_post()
self.assertTrue(self.invoice.payment_reference)
def test_get_reference_finnish_rf_invoice(self):
self.assertFalse(self.invoice.payment_reference)
self.invoice.journal_id.invoice_reference_model = 'fi_rf'
self.invoice.action_post()
self.assertTrue(self.invoice.payment_reference)
def test_get_reference_finnish_rf_partner(self):
self.assertFalse(self.invoice.payment_reference)
self.invoice.journal_id.invoice_reference_type = 'partner'
self.invoice.journal_id.invoice_reference_model = 'fi_rf'
self.invoice.action_post()
self.assertTrue(self.invoice.payment_reference)
| 44.666667 | 1,742 |
2,133 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import tagged
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.exceptions import UserError
@tagged('post_install_l10n', 'post_install', '-at_install')
class PaymentReferenceTest(AccountTestInvoicingCommon):
"""
All references validated with the reference calculator by Nordea Bank
http://www.nordea.fi/en/corporate-customers/payments/invoicing-and-payments/reference-number-calculator.html
"""
@classmethod
def setUpClass(cls, chart_template_ref='l10n_fi.fi_chart_template'):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.invoice = cls.init_invoice('out_invoice', products=cls.product_a+cls.product_b)
def test_payment_reference_fi(self):
compute = self.invoice.compute_payment_reference_finnish
# Common
self.assertEqual('1232', compute('INV123'))
self.assertEqual('1326', compute('132'))
self.assertEqual('1290', compute('ABC1B2B9C'))
# Insufficient
self.assertEqual('1119', compute('-1'))
self.assertEqual('1106', compute('0'))
self.assertEqual('1261', compute('26'))
# Excess length
self.assertEqual('12345678901234567894', compute('123456789012345678901234567890'))
# Invalid
with self.assertRaises(UserError):
compute('QWERTY')
def test_payment_reference_rf(self):
compute = self.invoice.compute_payment_reference_finnish_rf
# Common
self.assertEqual('RF111232', compute('INV123'))
self.assertEqual('RF921326', compute('132'))
self.assertEqual('RF941290', compute('ABC1B2B9C'))
# Insufficient
self.assertEqual('RF551119', compute('-1'))
self.assertEqual('RF181106', compute('0'))
self.assertEqual('RF041261', compute('26'))
# Excess length
self.assertEqual('RF0912345678901234567894', compute('123456789012345678901234567890'))
# Invalid
with self.assertRaises(UserError):
compute('QWERTY')
| 35.55 | 2,133 |
3,035 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import api, models, _
from odoo.exceptions import UserError
import logging
log = logging.getLogger(__name__)
class AccountInvoiceFinnish(models.Model):
_inherit = 'account.move'
@api.model
def number2numeric(self, number):
invoice_number = re.sub(r'\D', '', number)
if invoice_number == '' or invoice_number is False:
raise UserError(_('Invoice number must contain numeric characters'))
# Make sure the base number is 3...19 characters long
if len(invoice_number) < 3:
invoice_number = ('11' + invoice_number)[-3:]
elif len(invoice_number) > 19:
invoice_number = invoice_number[:19]
return invoice_number
@api.model
def get_finnish_check_digit(self, base_number):
# Multiply digits from end to beginning with 7, 3 and 1 and
# calculate the sum of the products
total = sum((7, 3, 1)[idx % 3] * int(val) for idx, val in
enumerate(base_number[::-1]))
# Subtract the sum from the next decade. 10 = 0
return str((10 - (total % 10)) % 10)
@api.model
def get_rf_check_digits(self, base_number):
check_base = base_number + 'RF00'
# 1. Convert all non-digits to digits
# 2. Calculate the modulo 97
# 3. Subtract the remainder from 98
# 4. Add leading zeros if necessary
return ''.join(
['00', str(98 - (int(''.join(
[x if x.isdigit() else str(ord(x) - 55) for x in
check_base])) % 97))])[-2:]
@api.model
def compute_payment_reference_finnish(self, number):
# Drop all non-numeric characters
invoice_number = self.number2numeric(number)
# Calculate the Finnish check digit
check_digit = self.get_finnish_check_digit(invoice_number)
return invoice_number + check_digit
@api.model
def compute_payment_reference_finnish_rf(self, number):
# Drop all non-numeric characters
invoice_number = self.number2numeric(number)
# Calculate the Finnish check digit
invoice_number += self.get_finnish_check_digit(invoice_number)
# Calculate the RF check digits
rf_check_digits = self.get_rf_check_digits(invoice_number)
return 'RF' + rf_check_digits + invoice_number
def _get_invoice_reference_fi_rf_invoice(self):
self.ensure_one()
return self.compute_payment_reference_finnish_rf(self.name)
def _get_invoice_reference_fi_rf_partner(self):
self.ensure_one()
return self.compute_payment_reference_finnish_rf(str(self.partner_id.id))
def _get_invoice_reference_fi_invoice(self):
self.ensure_one()
return self.compute_payment_reference_finnish(self.name)
def _get_invoice_reference_fi_partner(self):
self.ensure_one()
return self.compute_payment_reference_finnish(str(self.partner_id.id))
| 34.488636 | 3,035 |
517 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class AccountJournal(models.Model):
_inherit = 'account.journal'
invoice_reference_model = fields.Selection(selection_add=[
('fi', 'Finnish Standard Reference'),
('fi_rf', 'Finnish Creditor Reference (RF)'),
], ondelete={'fi': lambda recs: recs.write({'invoice_reference_model': 'odoo'}),
'fi_rf': lambda recs: recs.write({'invoice_reference_model': 'odoo'})})
| 39.769231 | 517 |
889 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Employee Presence Control',
'version': '1.0',
'category': 'Human Resources',
'description': """
Control Employees Presence
==========================
Based on:
* The IP Address
* The User's Session
* The Sent Emails
Allows to contact directly the employee in case of unjustified absence.
""",
'depends': ['hr', 'hr_holidays', 'sms'],
'data': [
'security/sms_security.xml',
'security/ir.model.access.csv',
'data/ir_actions_server.xml',
'views/hr_employee_views.xml',
'data/mail_template_data.xml',
'data/sms_data.xml',
'data/ir_cron.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'post_init_hook': 'post_init_hook',
'license': 'LGPL-3',
}
| 26.147059 | 889 |
250 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResCompany(models.Model):
_inherit = 'res.company'
hr_presence_last_compute_date = fields.Datetime()
| 25 | 250 |
8,797 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from ast import literal_eval
from odoo import fields, models, _, api
from odoo.exceptions import UserError
from odoo.fields import Datetime
_logger = logging.getLogger(__name__)
class Employee(models.AbstractModel):
_inherit = 'hr.employee.base'
email_sent = fields.Boolean(default=False)
ip_connected = fields.Boolean(default=False)
manually_set_present = fields.Boolean(default=False)
# Stored field used in the presence kanban reporting view
# to allow group by state.
hr_presence_state_display = fields.Selection([
('to_define', 'To Define'),
('present', 'Present'),
('absent', 'Absent'),
])
def _compute_presence_state(self):
super()._compute_presence_state()
employees = self.filtered(lambda e: e.hr_presence_state != 'present' and not e.is_absent)
company = self.env.company
employee_to_check_working = employees.filtered(lambda e:
not e.is_absent and
(e.email_sent or e.ip_connected or e.manually_set_present))
working_now_list = employee_to_check_working._get_employee_working_now()
for employee in employees:
if not employee.is_absent and company.hr_presence_last_compute_date and employee.id in working_now_list and \
company.hr_presence_last_compute_date.day == Datetime.now().day and \
(employee.email_sent or employee.ip_connected or employee.manually_set_present):
employee.hr_presence_state = 'present'
@api.model
def _check_presence(self):
company = self.env.company
if not company.hr_presence_last_compute_date or \
company.hr_presence_last_compute_date.day != Datetime.now().day:
self.env['hr.employee'].search([
('company_id', '=', company.id)
]).write({
'email_sent': False,
'ip_connected': False,
'manually_set_present': False
})
employees = self.env['hr.employee'].search([('company_id', '=', company.id)])
all_employees = employees
# Check on IP
if literal_eval(self.env['ir.config_parameter'].sudo().get_param('hr_presence.hr_presence_control_ip', 'False')):
ip_list = company.hr_presence_control_ip_list
ip_list = ip_list.split(',') if ip_list else []
ip_employees = self.env['hr.employee']
for employee in employees:
employee_ips = self.env['res.users.log'].search([
('create_uid', '=', employee.user_id.id),
('ip', '!=', False),
('create_date', '>=', Datetime.to_string(Datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)))]
).mapped('ip')
if any(ip in ip_list for ip in employee_ips):
ip_employees |= employee
ip_employees.write({'ip_connected': True})
employees = employees - ip_employees
# Check on sent emails
if literal_eval(self.env['ir.config_parameter'].sudo().get_param('hr_presence.hr_presence_control_email', 'False')):
email_employees = self.env['hr.employee']
threshold = company.hr_presence_control_email_amount
for employee in employees:
sent_emails = self.env['mail.message'].search_count([
('author_id', '=', employee.user_id.partner_id.id),
('date', '>=', Datetime.to_string(Datetime.now().replace(hour=0, minute=0, second=0, microsecond=0))),
('date', '<=', Datetime.to_string(Datetime.now()))])
if sent_emails >= threshold:
email_employees |= employee
email_employees.write({'email_sent': True})
employees = employees - email_employees
company.sudo().hr_presence_last_compute_date = Datetime.now()
for employee in all_employees:
employee.hr_presence_state_display = employee.hr_presence_state
@api.model
def _action_open_presence_view(self):
# Compute the presence/absence for the employees on the same
# company than the HR/manager. Then opens the kanban view
# of the employees with an undefined presence/absence
_logger.info("Employees presence checked by: %s" % self.env.user.name)
self._check_presence()
return {
"type": "ir.actions.act_window",
"res_model": "hr.employee",
"views": [[self.env.ref('hr_presence.hr_employee_view_kanban').id, "kanban"], [False, "tree"], [False, "form"]],
'view_mode': 'kanban,tree,form',
"domain": [],
"name": _("Employee's Presence to Define"),
"search_view_id": [self.env.ref('hr_presence.hr_employee_view_presence_search').id, 'search'],
"context": {'search_default_group_hr_presence_state': 1,
'searchpanel_default_hr_presence_state_display': 'to_define'},
}
def _action_set_manual_presence(self, state):
if not self.env.user.has_group('hr.group_hr_manager'):
raise UserError(_("You don't have the right to do this. Please contact an Administrator."))
self.write({'manually_set_present': state})
def action_set_present(self):
self._action_set_manual_presence(True)
def action_set_absent(self):
self._action_set_manual_presence(False)
def write(self, vals):
if vals.get('hr_presence_state_display') == 'present':
vals['manually_set_present'] = True
return super().write(vals)
def action_open_leave_request(self):
self.ensure_one()
return {
"type": "ir.actions.act_window",
"res_model": "hr.leave",
"views": [[False, "form"]],
"view_mode": 'form',
"context": {'default_employee_id': self.id},
}
# --------------------------------------------------
# Messaging
# --------------------------------------------------
def action_send_sms(self):
self.ensure_one()
if not self.env.user.has_group('hr.group_hr_manager'):
raise UserError(_("You don't have the right to do this. Please contact an Administrator."))
if not self.mobile_phone:
raise UserError(_("There is no professional mobile for this employee."))
context = dict(self.env.context)
context.update(default_res_model='hr.employee', default_res_id=self.id, default_composition_mode='comment', default_number_field_name='mobile_phone')
template = self.env.ref('hr_presence.sms_template_presence', False)
if not template:
context['default_body'] = _("""Exception made if there was a mistake of ours, it seems that you are not at your office and there is not request of time off from you.
Please, take appropriate measures in order to carry out this work absence.
Do not hesitate to contact your manager or the human resource department.""")
else:
context['default_template_id'] = template.id
return {
"type": "ir.actions.act_window",
"res_model": "sms.composer",
"view_mode": 'form',
"context": context,
"name": "Send SMS Text Message",
"target": "new",
}
def action_send_mail(self):
self.ensure_one()
if not self.env.user.has_group('hr.group_hr_manager'):
raise UserError(_("You don't have the right to do this. Please contact an Administrator."))
if not self.work_email:
raise UserError(_("There is no professional email address for this employee."))
template = self.env.ref('hr_presence.mail_template_presence', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model="hr.employee",
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template.id,
default_composition_mode='comment',
default_is_log=True,
custom_layout='mail.mail_notification_light',
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
| 43.985 | 8,797 |
215 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
class ResUsersLog(models.Model):
_inherit = 'res.users.log'
create_uid = fields.Integer(index=True)
ip = fields.Char(string="IP Address")
| 21.5 | 215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.