size
int64 0
304k
| ext
stringclasses 1
value | lang
stringclasses 1
value | branch
stringclasses 1
value | content
stringlengths 0
304k
| avg_line_length
float64 0
238
| max_line_length
int64 0
304k
|
---|---|---|---|---|---|---|
713 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Web Widget Child Selector",
"summary": "Widget used for navigation on hierarchy fields",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "Creu Blanca,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"assets": {
"web.assets_backend": [
"web_widget_child_selector/static/src/js/**/*",
"web_widget_child_selector/static/src/scss/**/*",
],
"web.assets_qweb": ["web_widget_child_selector/static/src/xml/**/*"],
},
"qweb": ["static/src/xml/widget_child_selector.xml"],
}
| 35.65 | 713 |
1,195 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestWidgetChildSelector(TransactionCase):
def setUp(self):
super().setUp()
ResPartner = self.env["res.partner"].with_context(tracking_disable=True)
self.partner_1 = ResPartner.create({"name": "P1"})
self.partner_2 = ResPartner.create(
{"name": "P2", "parent_id": self.partner_1.id}
)
self.partner_3 = ResPartner.create(
{"name": "P3", "parent_id": self.partner_2.id}
)
# Model that doesnt have the parent/child structure
self.group = self.env["res.groups"].create({"name": "Group"})
def test_widget_child_selector(self):
res = self.partner_2.get_record_direct_childs_parents(
{"child_selection_field": "name"}
)
self.assertIn((self.partner_1.id, self.partner_1.name), res["parents"])
self.assertIn((self.partner_3.id, self.partner_3.name), res["childs"])
res = self.group.get_record_direct_childs_parents({})
self.assertFalse(res["parents"])
self.assertFalse(res["childs"])
| 41.206897 | 1,195 |
1,066 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class Base(models.AbstractModel):
_inherit = "base"
def _get_record_parents(self, field):
if not self or not hasattr(self, self._parent_name):
return []
return getattr(self, self._parent_name)._get_record_parents(field) + [
(self.id, str(getattr(self, field)))
]
def _get_record_direct_childs(self, field, domain):
if not hasattr(self, self._parent_name):
return []
return [
(r.id, str(getattr(r, field)))
for r in self.search([(self._parent_name, "=", self.id or False)] + domain)
]
def get_record_direct_childs_parents(self, options, domain=False):
if not domain:
domain = []
field = options.get("child_selection_field", "display_name")
return {
"childs": self._get_record_direct_childs(field, domain),
"parents": self._get_record_parents(field),
}
| 33.3125 | 1,066 |
703 |
py
|
PYTHON
|
15.0
|
{
"name": "Web Refresher",
"version": "15.0.2.0.0",
"author": "Compassion Switzerland, Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"installable": True,
"auto_install": False,
"assets": {
"web.assets_backend": [
"web_refresher/static/src/scss/refresher.scss",
"web_refresher/static/src/js/refresher.esm.js",
"web_refresher/static/src/js/control_panel.esm.js",
],
"web.assets_qweb": [
"web_refresher/static/src/xml/refresher.xml",
"web_refresher/static/src/xml/control_panel.xml",
],
},
}
| 33.47619 | 703 |
899 |
py
|
PYTHON
|
15.0
|
# Copyright 2019-2020 Brainbean Apps (https://brainbeanapps.com)
# Copyright 2020 CorporateHub (https://corporatehub.eu)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Dynamic Dropdown Widget",
"summary": "This module adds support for dynamic dropdown widget",
"category": "Web",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "CorporateHub, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"installable": True,
"assets": {
"web.assets_backend": [
"web_widget_dropdown_dynamic/static/src/js/basic_model.js",
"web_widget_dropdown_dynamic/static/src/js/field_dynamic_dropdown.js",
],
"web.qunit_suite_tests": [
"web_widget_dropdown_dynamic/static/tests/web_widget_dropdown_dynamic_tests.js"
],
},
}
| 39.086957 | 899 |
688 |
py
|
PYTHON
|
15.0
|
# pylint: disable=missing-docstring
# Copyright 2016 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Web Notify",
"summary": """
Send notification messages to user""",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV," "AdaptiveCity," "Odoo Community Association (OCA)",
"development_status": "Production/Stable",
"website": "https://github.com/OCA/web",
"depends": ["web", "bus", "base", "mail"],
"assets": {
"web.assets_backend": [
"web_notify/static/src/js/services/*.js",
]
},
"demo": ["views/res_users_demo.xml"],
"installable": True,
}
| 31.272727 | 688 |
3,807 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import json
from odoo import exceptions
from odoo.tests import common
from ..models.res_users import DANGER, DEFAULT, INFO, SUCCESS, WARNING
class TestResUsers(common.TransactionCase):
def test_notify_success(self):
bus_bus = self.env["bus.bus"]
domain = [("channel", "=", self.env.user.notify_success_channel_name)]
existing = bus_bus.search(domain)
test_msg = {"message": "message", "title": "title", "sticky": True}
self.env.user.notify_success(**test_msg)
news = bus_bus.search(domain) - existing
self.assertEqual(1, len(news))
test_msg.update({"type": SUCCESS})
payload = json.loads(news.message)["payload"][0]
self.assertDictEqual(test_msg, payload)
def test_notify_danger(self):
bus_bus = self.env["bus.bus"]
domain = [("channel", "=", self.env.user.notify_danger_channel_name)]
existing = bus_bus.search(domain)
test_msg = {"message": "message", "title": "title", "sticky": True}
self.env.user.notify_danger(**test_msg)
news = bus_bus.search(domain) - existing
self.assertEqual(1, len(news))
test_msg.update({"type": DANGER})
payload = json.loads(news.message)["payload"][0]
self.assertDictEqual(test_msg, payload)
def test_notify_warning(self):
bus_bus = self.env["bus.bus"]
domain = [("channel", "=", self.env.user.notify_warning_channel_name)]
existing = bus_bus.search(domain)
test_msg = {"message": "message", "title": "title", "sticky": True}
self.env.user.notify_warning(**test_msg)
news = bus_bus.search(domain) - existing
self.assertEqual(1, len(news))
test_msg.update({"type": WARNING})
payload = json.loads(news.message)["payload"][0]
self.assertDictEqual(test_msg, payload)
def test_notify_info(self):
bus_bus = self.env["bus.bus"]
domain = [("channel", "=", self.env.user.notify_info_channel_name)]
existing = bus_bus.search(domain)
test_msg = {"message": "message", "title": "title", "sticky": True}
self.env.user.notify_info(**test_msg)
news = bus_bus.search(domain) - existing
self.assertEqual(1, len(news))
test_msg.update({"type": INFO})
payload = json.loads(news.message)["payload"][0]
self.assertDictEqual(test_msg, payload)
def test_notify_default(self):
bus_bus = self.env["bus.bus"]
domain = [("channel", "=", self.env.user.notify_default_channel_name)]
existing = bus_bus.search(domain)
test_msg = {"message": "message", "title": "title", "sticky": True}
self.env.user.notify_default(**test_msg)
news = bus_bus.search(domain) - existing
self.assertEqual(1, len(news))
test_msg.update({"type": DEFAULT})
payload = json.loads(news.message)["payload"][0]
self.assertDictEqual(test_msg, payload)
def test_notify_many(self):
# check that the notification of a list of users is done with
# a single call to the bus
users = self.env.user.search([(1, "=", 1)])
self.assertTrue(len(users) > 1)
self.env.user.notify_warning(message="message", target=users.partner_id)
def test_notify_other_user(self):
other_user = self.env.ref("base.user_demo")
other_user_model = self.env["res.users"].with_user(other_user)
with self.assertRaises(exceptions.UserError):
other_user_model.browse(self.env.uid).notify_info(message="hello")
def test_notify_admin_allowed_other_user(self):
other_user = self.env.ref("base.user_demo")
other_user.notify_info(message="hello")
| 42.775281 | 3,807 |
3,492 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, exceptions, fields, models
from odoo.addons.bus.models.bus import channel_with_db, json_dump
DEFAULT_MESSAGE = "Default message"
SUCCESS = "success"
DANGER = "danger"
WARNING = "warning"
INFO = "info"
DEFAULT = "default"
class ResUsers(models.Model):
_inherit = "res.users"
@api.depends("create_date")
def _compute_channel_names(self):
for record in self:
record.notify_success_channel_name = json_dump(
channel_with_db(self.env.cr.dbname, record.partner_id)
)
record.notify_danger_channel_name = json_dump(
channel_with_db(self.env.cr.dbname, record.partner_id)
)
record.notify_warning_channel_name = json_dump(
channel_with_db(self.env.cr.dbname, record.partner_id)
)
record.notify_info_channel_name = json_dump(
channel_with_db(self.env.cr.dbname, record.partner_id)
)
record.notify_default_channel_name = json_dump(
channel_with_db(self.env.cr.dbname, record.partner_id)
)
notify_success_channel_name = fields.Char(compute="_compute_channel_names")
notify_danger_channel_name = fields.Char(compute="_compute_channel_names")
notify_warning_channel_name = fields.Char(compute="_compute_channel_names")
notify_info_channel_name = fields.Char(compute="_compute_channel_names")
notify_default_channel_name = fields.Char(compute="_compute_channel_names")
def notify_success(
self, message="Default message", title=None, sticky=False, target=None
):
title = title or _("Success")
self._notify_channel(SUCCESS, message, title, sticky, target)
def notify_danger(
self, message="Default message", title=None, sticky=False, target=None
):
title = title or _("Danger")
self._notify_channel(DANGER, message, title, sticky, target)
def notify_warning(
self, message="Default message", title=None, sticky=False, target=None
):
title = title or _("Warning")
self._notify_channel(WARNING, message, title, sticky, target)
def notify_info(
self, message="Default message", title=None, sticky=False, target=None
):
title = title or _("Information")
self._notify_channel(INFO, message, title, sticky, target)
def notify_default(
self, message="Default message", title=None, sticky=False, target=None
):
title = title or _("Default")
self._notify_channel(DEFAULT, message, title, sticky, target)
def _notify_channel(
self,
type_message=DEFAULT,
message=DEFAULT_MESSAGE,
title=None,
sticky=False,
target=None,
):
if not self.env.user._is_admin() and any(
user.id != self.env.uid for user in self
):
raise exceptions.UserError(
_("Sending a notification to another user is forbidden.")
)
if not target:
target = self.env.user.partner_id
bus_message = {
"type": type_message,
"message": message,
"title": title,
"sticky": sticky,
}
notifications = [[partner, "web.notify", [bus_message]] for partner in target]
self.env["bus.bus"]._sendmany(notifications)
| 35.632653 | 3,492 |
909 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Tecnativa - Alexandre Díaz
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{
"name": "Web Pivot Computed Measure",
"category": "web",
"version": "15.0.1.0.1",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"auto_install": False,
"installable": True,
"assets": {
"web.assets_qweb": [
"/web_pivot_computed_measure/static/src/**/*.xml",
],
"web.assets_backend": [
"/web_pivot_computed_measure/static/src/**/*.esm.js",
"/web_pivot_computed_measure/static/src/**/*.scss",
("remove", "/web_pivot_computed_measure/static/src/test/*.esm.js"),
],
"web.assets_tests": [
"/web_pivot_computed_measure/static/src/test/test.esm.js",
],
},
}
| 34.923077 | 908 |
282 |
py
|
PYTHON
|
15.0
|
# Copyright 2022 Tecnativa - Carlos Roca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from odoo import fields, models
class ResUsersFake(models.Model):
_inherit = "res.users"
user_year_born = fields.Integer()
user_year_now = fields.Integer()
| 23.5 | 282 |
1,108 |
py
|
PYTHON
|
15.0
|
# Copyright 2022 Tecnativa - Carlos Roca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from odoo_test_helper import FakeModelLoader
from odoo.tests import common, tagged
@tagged("post_install", "-at_install")
class TestUIPivot(common.HttpCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.loader = FakeModelLoader(cls.env, cls.__module__)
cls.loader.backup_registry()
from .res_users_fake import ResUsersFake
cls.loader.update_registry((ResUsersFake,))
cls.env["res.users"].create(
{
"name": "User 1",
"login": "us_1",
# Fake fields
"user_year_born": 1998,
"user_year_now": 2022,
}
)
# Set pivot view to company action
action = cls.env.ref("base.action_res_users")
action.view_mode += ",pivot"
def test_ui(self):
self.start_tour(
"/web",
"web_pivot_computed_measure_tour",
login="admin",
step_delay=100,
)
| 29.945946 | 1,108 |
1,195 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Web timeline",
"summary": "Interactive visualization chart to show events in time",
"version": "15.0.1.0.2",
"development_status": "Production/Stable",
"author": "ACSONE SA/NV, "
"Tecnativa, "
"Monk Software, "
"Onestein, "
"Trobz, "
"Odoo Community Association (OCA)",
"category": "web",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"data": [],
"maintainers": ["tarteo"],
"application": False,
"installable": True,
"assets": {
"web.assets_backend": [
"web_timeline/static/src/scss/web_timeline.scss",
"web_timeline/static/src/js/timeline_view.js",
"web_timeline/static/src/js/timeline_renderer.js",
"web_timeline/static/src/js/timeline_controller.js",
"web_timeline/static/src/js/timeline_model.js",
"web_timeline/static/src/js/timeline_canvas.js",
],
"web.assets_qweb": [
"web_timeline/static/src/xml/web_timeline.xml",
],
},
}
| 33.194444 | 1,195 |
315 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
TIMELINE_VIEW = ("timeline", "Timeline")
class IrUIView(models.Model):
_inherit = "ir.ui.view"
type = fields.Selection(selection_add=[TIMELINE_VIEW])
| 26.25 | 315 |
662 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 Andrius Preimantas <[email protected]>
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Use AND conditions on omnibar search",
"version": "15.0.1.0.0",
"author": "Versada UAB, ACSONE SA/NV, Serincloud, Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "web",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"assets": {
"web.assets_backend": [
"/web_search_with_and/static/src/js/control_panel_model_extension.js",
"/web_search_with_and/static/src/js/search_bar.js",
],
},
}
| 34.842105 | 662 |
791 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 - 2018 Modoolar <[email protected]>
# Copyright 2018 Brainbean Apps
# Copyright 2020 Manuel Calero
# Copyright 2020 CorporateHub (https://corporatehub.eu)
# License LGPLv3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
{
"name": "Web Actions Multi",
"summary": "Enables triggering of more than one action on ActionManager",
"category": "Web",
"version": "15.0.1.0.0",
"license": "LGPL-3",
"author": "Modoolar, " "CorporateHub, " "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"data": ["security/ir.model.access.csv"],
"assets": {
"web.assets_backend": [
"web_ir_actions_act_multi/static/src/**/*.esm.js",
],
},
"installable": True,
}
| 34.391304 | 791 |
433 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class IrActionsActMulti(models.Model):
_name = "ir.actions.act_multi"
_description = "Action Mulit"
_inherit = "ir.actions.actions"
_table = "ir_actions"
type = fields.Char(default="ir.actions.act_multi")
def _get_readable_fields(self):
return super()._get_readable_fields() | {"actions"}
| 28.866667 | 433 |
823 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 GRAP - Quentin DUPONT
# Copyright 2020 Tecnativa - Alexandre Díaz
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{
"name": "Web Widget Numeric Step",
"category": "web",
"version": "15.0.1.0.0",
"author": "GRAP, Tecnativa, " "Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"demo": ["demo/res_users_view.xml"],
"assets": {
"web.assets_backend": [
"web_widget_numeric_step/static/src/js/numeric_step.js",
"web_widget_numeric_step/static/src/css/numeric_step.scss",
],
"web.assets_qweb": [
"web_widget_numeric_step/static/src/xml/numeric_step.xml",
],
},
"auto_install": False,
"installable": True,
}
| 32.88 | 822 |
1,166 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 ACSONE SA/NV
# Copyright 2018 Amaris
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Web Dialog Size",
"summary": """
A module that lets the user expand a
dialog box to the full screen width.""",
"author": "ACSONE SA/NV, "
"Therp BV, "
"Siddharth Bhalgami,"
"Tecnativa, "
"Amaris, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"category": "web",
"version": "15.0.1.0.1",
"license": "AGPL-3",
"depends": ["web"],
"installable": True,
"assets": {
"web.assets_backend": [
"/web_dialog_size/static/src/js/web_dialog_size.js",
"/web_dialog_size/static/src/js/web_dialog_size.esm.js",
"/web_dialog_size/static/src/js/web_dialog_draggable.esm.js",
"/web_dialog_size/static/src/scss/web_dialog_size.scss",
],
"web.assets_qweb": [
"/web_dialog_size/static/src/xml/web_dialog_size.xml",
"/web_dialog_size/static/src/xml/ExpandButton.xml",
"/web_dialog_size/static/src/xml/DialogDraggable.xml",
],
},
}
| 33.314286 | 1,166 |
729 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Ernesto Tejeda
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0
from odoo.tests.common import TransactionCase
class TestWebDialogSize(TransactionCase):
def setUp(self):
super(TestWebDialogSize, self).setUp()
def test_get_web_dialog_size_config(self):
obj = self.env["ir.config_parameter"]
self.assertFalse(obj.get_web_dialog_size_config()["default_maximize"])
obj.set_param("web_dialog_size.default_maximize", "True")
self.assertTrue(obj.get_web_dialog_size_config()["default_maximize"])
obj.set_param("web_dialog_size.default_maximize", "False")
self.assertFalse(obj.get_web_dialog_size_config()["default_maximize"])
| 36.45 | 729 |
514 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.models import Model, api
from odoo.tools.safe_eval import const_eval
class IrConfigParameter(Model):
_inherit = "ir.config_parameter"
@api.model
def get_web_dialog_size_config(self):
get_param = self.sudo().get_param
return {
"default_maximize": const_eval(
get_param("web_dialog_size.default_maximize", "False")
)
}
| 28.555556 | 514 |
753 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 0k.io
# Copyright 2016 ACSONE SA/NV
# Copyright 2017 Tecnativa
# Copyright 2020 initOS GmbH.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "web_m2x_options",
"version": "15.0.1.1.0",
"category": "Web",
"author": "initOS GmbH,"
"ACSONE SA/NV, "
"0k.io, "
"Tecnativa, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"license": "AGPL-3",
"depends": ["web"],
"assets": {
"web.assets_backend": [
"web_m2x_options/static/src/js/form.js",
"web_m2x_options/static/src/js/ir_options.js",
],
"web.assets_qweb": ["web_m2x_options/static/src/xml/base.xml"],
},
"installable": True,
}
| 27.888889 | 753 |
1,475 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 initOS GmbH.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
class TestIrConfigParameter(common.TransactionCase):
@classmethod
def setUpClass(cls):
super(TestIrConfigParameter, cls).setUpClass()
cls.env["ir.config_parameter"].set_param("web_m2x_options.limit", 10)
cls.env["ir.config_parameter"].set_param("web_m2x_options.create_edit", "True")
cls.env["ir.config_parameter"].set_param("web_m2x_options.create", "True")
cls.env["ir.config_parameter"].set_param("web_m2x_options.search_more", "False")
cls.env["ir.config_parameter"].set_param("web_m2x_options.m2o_dialog", "True")
def test_web_m2x_options_key(self):
web_m2x_options = self.env["ir.config_parameter"].get_web_m2x_options()
self.assertIn("web_m2x_options.limit", web_m2x_options)
self.assertNotIn("web_m2x_options.m2o_dialog_test", web_m2x_options)
def test_web_m2x_options_value(self):
web_m2x_options = self.env["ir.config_parameter"].get_web_m2x_options()
self.assertEqual(web_m2x_options["web_m2x_options.limit"], "10")
self.assertTrue(bool(web_m2x_options["web_m2x_options.create_edit"]))
self.assertTrue(bool(web_m2x_options["web_m2x_options.create"]))
self.assertEqual(web_m2x_options["web_m2x_options.search_more"], "False")
self.assertTrue(bool(web_m2x_options["web_m2x_options.m2o_dialog"]))
| 52.678571 | 1,475 |
581 |
py
|
PYTHON
|
15.0
|
from odoo import api, models
class IrConfigParameter(models.Model):
_inherit = "ir.config_parameter"
@api.model
def get_web_m2x_options(self):
opts = [
"web_m2x_options.create",
"web_m2x_options.create_edit",
"web_m2x_options.limit",
"web_m2x_options.search_more",
"web_m2x_options.m2o_dialog",
"web_m2x_options.field_limit_entries",
]
values = self.sudo().search_read([["key", "in", opts]], ["key", "value"])
return {res["key"]: res["value"] for res in values}
| 32.277778 | 581 |
656 |
py
|
PYTHON
|
15.0
|
# © 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "List Range Selection",
"summary": """
Enables selecting a range of records using the shift key
""",
"version": "15.0.1.0.0",
"category": "Web",
"author": "Onestein, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"license": "AGPL-3",
"depends": ["web"],
"installable": True,
"application": False,
"assets": {
"web.assets_backend": [
"/web_listview_range_select/static/src/js/web_listview_range_select.js"
],
},
}
| 29.772727 | 655 |
783 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 ForgeFlow S.L.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
{
"name": "Web Widget Bokeh Chart",
"category": "Hidden",
"summary": "This widget allows to display charts using Bokeh library.",
"author": "ForgeFlow, " "Odoo Community Association (OCA)",
"version": "15.0.1.1.1",
"maintainers": ["LoisRForgeFlow", "ChrisOForgeFlow"],
"development_status": "Production/Stable",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"data": [],
"external_dependencies": {"python": ["bokeh==2.4.2"]},
"auto_install": False,
"license": "LGPL-3",
"assets": {
"web.assets_backend": [
"web_widget_bokeh_chart/static/src/js/web_widget_bokeh_chart.esm.js",
],
},
}
| 34.043478 | 783 |
692 |
py
|
PYTHON
|
15.0
|
# Copyright 2015-2018 Camptocamp SA, Damien Crier
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Colorize field in tree views",
"summary": "Allows you to dynamically color fields on tree views",
"category": "Hidden/Dependency",
"version": "15.0.1.0.0",
"depends": ["web"],
"author": "Camptocamp, Therp BV, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"demo": ["demo/res_users.xml"],
"installable": True,
"assets": {
"web.assets_backend": [
"/web_tree_dynamic_colored_field/static/src/js/web_tree_dynamic_colored_field.js",
],
},
}
| 36.421053 | 692 |
559 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Web View Calendar List",
"summary": """
Show calendars as a List""",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "Creu Blanca,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"assets": {
"web.assets_backend": [
"web_view_calendar_list/static/src/js/**/*",
"web_view_calendar_list/static/src/scss/**/*",
],
},
}
| 29.421053 | 559 |
329 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class IrUiView(models.Model):
_inherit = "ir.ui.view"
type = fields.Selection(
selection_add=[("calendar_list", "Calendar List")],
ondelete={"calendar_list": "cascade"},
)
| 25.307692 | 329 |
364 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class IrActionsActWindowView(models.Model):
_inherit = "ir.actions.act_window.view"
view_mode = fields.Selection(
selection_add=[("calendar_list", "Calendar List")],
ondelete={"calendar_list": "cascade"},
)
| 28 | 364 |
654 |
py
|
PYTHON
|
15.0
|
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
"name": "Web Time Range Menu Custom",
"version": "15.0.1.0.1",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"installable": True,
"auto_install": False,
"assets": {
"web.assets_backend": [
"/web_time_range_menu_custom/static/src/js/*.esm.js",
"/web_time_range_menu_custom/static/src/scss/*.scss",
],
"web.assets_qweb": [
"/web_time_range_menu_custom/static/src/xml/*.xml",
],
},
}
| 31.142857 | 654 |
623 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Simone Orsi - Camptocamp SA
# License LGPLv3.0 or later (https://www.gnu.org/licenses/lgpl-3.0.en.html).
{
"name": "Web URL widget advanced",
"summary": "This module extends URL widget "
"for displaying anchors with custom labels.",
"category": "Web",
"version": "15.0.1.0.0",
"license": "LGPL-3",
"author": "Camptocamp, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"data": [],
"assets": {
"web.assets_backend": ["web_widget_url_advanced/static/src/js/url_widget.js"],
},
"installable": True,
}
| 34.611111 | 623 |
559 |
py
|
PYTHON
|
15.0
|
# Copyright 2019-2020 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Widget Open on new Tab",
"summary": """
Allow to open record from trees on new tab from tree views""",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "Creu Blanca,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"demo": ["demo/res_users_view.xml"],
"assets": {
"web.assets_backend": ["web_widget_open_tab/static/src/js/widget.js"],
},
}
| 32.882353 | 559 |
598 |
py
|
PYTHON
|
15.0
|
{
"name": "Group Expand Buttons",
"category": "Web",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "OpenERP SA, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Therp BV, "
"Xtendoo, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/web",
"depends": ["web"],
"assets": {
"web.assets_backend": [
"/web_group_expand/static/src/js/web_group_expand.esm.js",
],
"web.assets_qweb": [
"/web_group_expand/static/src/xml/expand_buttons.xml",
],
},
}
| 27.181818 | 598 |
1,664 |
py
|
PYTHON
|
15.0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
from os.path import join, dirname
exec(open(join(dirname(__file__), 'odoo', 'release.py'), 'rb').read()) # Load release variables
lib_name = 'odoo'
setup(
name='odoo',
version=version,
description=description,
long_description=long_desc,
url=url,
author=author,
author_email=author_email,
classifiers=[c for c in classifiers.split('\n') if c],
license=license,
scripts=['setup/odoo'],
packages=find_packages(),
package_dir={'%s' % lib_name: 'odoo'},
include_package_data=True,
install_requires=[
'babel >= 1.0',
'decorator',
'docutils',
'gevent',
'idna',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'libsass',
'mock',
'ofxparse',
'passlib',
'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'polib',
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycopg2 >= 2.2',
'pydot',
'pyopenssl',
'pypdf2',
'pyserial',
'python-dateutil',
'python-stdnum',
'pytz',
'pyusb >= 1.0.0b1',
'qrcode',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'requests',
'zeep',
'vobject',
'werkzeug',
'xlsxwriter',
'xlwt',
],
python_requires='>=3.7',
extras_require={
'ldap': ['python-ldap'],
'SSL': ['pyopenssl'],
},
tests_require=[
'freezegun',
],
)
| 25.212121 | 1,664 |
1,669 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
RELEASE_LEVELS = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL] = ['alpha', 'beta', 'candidate', 'final']
RELEASE_LEVELS_DISPLAY = {ALPHA: ALPHA,
BETA: BETA,
RELEASE_CANDIDATE: 'rc',
FINAL: ''}
# version_info format: (MAJOR, MINOR, MICRO, RELEASE_LEVEL, SERIAL)
# inspired by Python's own sys.version_info, in order to be
# properly comparable using normal operarors, for example:
# (6,1,0,'beta',0) < (6,1,0,'candidate',1) < (6,1,0,'candidate',2)
# (6,1,0,'candidate',2) < (6,1,0,'final',0) < (6,1,2,'final',0)
version_info = (15, 0, 0, FINAL, 0, '')
version = '.'.join(str(s) for s in version_info[:2]) + RELEASE_LEVELS_DISPLAY[version_info[3]] + str(version_info[4] or '') + version_info[5]
series = serie = major_version = '.'.join(str(s) for s in version_info[:2])
product_name = 'Odoo'
description = 'Odoo Server'
long_desc = '''Odoo is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU Lesser General Public License v3
Programming Language :: Python
"""
url = 'https://www.odoo.com'
author = 'OpenERP S.A.'
author_email = '[email protected]'
license = 'LGPL-3'
nt_service_name = "odoo-server-" + series.replace('~','-')
| 45.108108 | 1,669 |
11,352 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import logging.handlers
import os
import platform
import pprint
import sys
import threading
import time
import traceback
import warnings
from . import release
from . import sql_db
from . import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Logging Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] if tools.config['log_db'] and tools.config['log_db'] != '%d' else ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('odoo.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
# preclude risks of deadlocks
cr.execute("SET LOCAL statement_timeout = 1000")
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname, record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences needed to get colored output
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class PerfFilter(logging.Filter):
def format_perf(self, query_count, query_time, remaining_time):
return ("%d" % query_count, "%.3f" % query_time, "%.3f" % remaining_time)
def filter(self, record):
if hasattr(threading.current_thread(), "query_count"):
query_count = threading.current_thread().query_count
query_time = threading.current_thread().query_time
perf_t0 = threading.current_thread().perf_t0
remaining_time = time.time() - perf_t0 - query_time
record.perf_info = '%s %s %s' % self.format_perf(query_count, query_time, remaining_time)
delattr(threading.current_thread(), "query_count")
else:
record.perf_info = "- - -"
return True
class ColoredPerfFilter(PerfFilter):
def format_perf(self, query_count, query_time, remaining_time):
def colorize_time(time, format, low=1, high=5):
if time > high:
return COLOR_PATTERN % (30 + RED, 40 + DEFAULT, format % time)
if time > low:
return COLOR_PATTERN % (30 + YELLOW, 40 + DEFAULT, format % time)
return format % time
return (
colorize_time(query_count, "%d", 100, 1000),
colorize_time(query_time, "%.3f", 0.1, 3),
colorize_time(remaining_time, "%.3f", 1, 5)
)
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.current_thread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.perf_info = ""
return record
logging.setLogRecordFactory(record_factory)
# enable deprecation warnings (disabled by default)
warnings.simplefilter('default', category=DeprecationWarning)
# ignore deprecation warnings from invalid escape (there's a ton and it's
# pretty likely a super low-value signal)
warnings.filterwarnings('ignore', r'^invalid escape sequence \'?\\.', category=DeprecationWarning)
# recordsets are both sequence and set so trigger warning despite no issue
warnings.filterwarnings('ignore', r'^Sampling from a set', category=DeprecationWarning, module='odoo')
# ignore a bunch of warnings we can't really fix ourselves
for module in [
'babel.util', # deprecated parser module, no release yet
'zeep.loader',# zeep using defusedxml.lxml
'reportlab.lib.rl_safe_eval',# reportlab importing ABC from collections
'ofxparse',# ofxparse importing ABC from collections
'astroid', # deprecated imp module (fixed in 2.5.1)
'requests_toolbelt', # importing ABC from collections (fixed in 0.9)
]:
warnings.filterwarnings('ignore', category=DeprecationWarning, module=module)
# the SVG guesser thing always compares str and bytes, ignore it
warnings.filterwarnings('ignore', category=BytesWarning, module='odoo.tools.image')
# reportlab does a bunch of bytes/str mixing in a hashmap
warnings.filterwarnings('ignore', category=BytesWarning, module='reportlab.platypus.paraparser')
from .tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
# Normal Handler on stderr
handler = logging.StreamHandler()
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
elif platform.system() == 'Darwin':
handler = logging.handlers.SysLogHandler('/var/run/log')
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
perf_filter = ColoredPerfFilter()
else:
formatter = DBFormatter(format)
perf_filter = PerfFilter()
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger('werkzeug').addFilter(perf_filter)
if tools.config['log_db']:
db_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.strip().split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'odoo.http.rpc.request:INFO',
'odoo.http.rpc.response:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['odoo:DEBUG', 'odoo.sql_db:INFO', 'odoo.http.rpc:DEBUG'],
'debug_rpc': ['odoo:DEBUG', 'odoo.sql_db:INFO', 'odoo.http.rpc.request:DEBUG'],
'debug': ['odoo:DEBUG', 'odoo.sql_db:INFO'],
'debug_sql': ['odoo.sql_db:DEBUG'],
'info': [],
'runbot': ['odoo:RUNBOT', 'werkzeug:WARNING'],
'warn': ['odoo:WARNING', 'werkzeug:WARNING'],
'error': ['odoo:ERROR', 'werkzeug:ERROR'],
'critical': ['odoo:CRITICAL', 'werkzeug:CRITICAL'],
}
logging.RUNBOT = 25
logging.addLevelName(logging.RUNBOT, "INFO") # displayed as info in log
logging.captureWarnings(True)
# must be after `loggin.captureWarnings` so we override *that* instead of the
# other way around
showwarning = warnings.showwarning
IGNORE = {
'Comparison between bytes and int', # a.foo != False or some shit, we don't care
}
def showwarning_with_traceback(message, category, filename, lineno, file=None, line=None):
if category is BytesWarning and message.args[0] in IGNORE:
return
# find the stack frame maching (filename, lineno)
filtered = []
for frame in traceback.extract_stack():
if 'importlib' not in frame.filename:
filtered.append(frame)
if frame.filename == filename and frame.lineno == lineno:
break
return showwarning(
message, category, filename, lineno,
file=file,
line=''.join(traceback.format_list(filtered))
)
warnings.showwarning = showwarning_with_traceback
def runbot(self, message, *args, **kws):
self.log(logging.RUNBOT, message, *args, **kws)
logging.Logger.runbot = runbot
| 41.28 | 11,352 |
300,877 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Object Relational Mapping module:
* Hierarchical structure
* Constraints consistency and validation
* Object metadata depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default field values
* Permissions optimisation
* Persistent object: DB postgresql
* Data conversion
* Multi-level caching system
* Two different inheritance mechanisms
* Rich set of field types:
- classical (varchar, integer, boolean, ...)
- relational (one2many, many2one, many2many)
- functional
"""
import collections
import contextlib
import datetime
import dateutil
import fnmatch
import functools
import itertools
import io
import logging
import operator
import pytz
import re
import uuid
from collections import defaultdict, OrderedDict
from collections.abc import MutableMapping
from contextlib import closing
from inspect import getmembers, currentframe
from operator import attrgetter, itemgetter
import babel.dates
import dateutil.relativedelta
import psycopg2, psycopg2.extensions
from lxml import etree
from lxml.builder import E
from psycopg2.extensions import AsIs
import odoo
from . import SUPERUSER_ID
from . import api
from . import tools
from .exceptions import AccessError, MissingError, ValidationError, UserError
from .osv.query import Query
from .tools import frozendict, lazy_classproperty, ormcache, \
Collector, LastOrderedSet, OrderedSet, IterableGenerator, \
groupby, discardattr, partition
from .tools.config import config
from .tools.func import frame_codeinfo
from .tools.misc import CountingStream, clean_context, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT, get_lang
from .tools.translate import _
from .tools import date_utils
from .tools import populate
from .tools import unique
from .tools.lru import LRU
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
_unlink = logging.getLogger(__name__ + '.unlink')
regex_order = re.compile('^(\s*([a-z0-9:_]+|"[a-z0-9:_]+")(\s+(desc|asc))?\s*(,|$))+(?<!,)$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
regex_pg_name = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
regex_field_agg = re.compile(r'(\w+)(?::(\w+)(?:\((\w+)\))?)?')
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def check_object_name(name):
""" Check if the given name is a valid model name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
raise ValueError(msg)
def check_pg_name(name):
""" Check whether the given name is a valid PostgreSQL identifier name. """
if not regex_pg_name.match(name):
raise ValidationError("Invalid characters in table name %r" % name)
if len(name) > 63:
raise ValidationError("Table name %r is too long" % name)
# match private methods, to prevent their remote invocation
regex_private = re.compile(r'^(_.*|init)$')
def check_method_name(name):
""" Raise an ``AccessError`` if ``name`` is a private method name. """
if regex_private.match(name):
raise AccessError(_('Private methods (such as %s) cannot be called remotely.') % (name,))
def same_name(f, g):
""" Test whether functions ``f`` and ``g`` are identical or have the same name """
return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
def trigger_tree_merge(node1, node2):
""" Merge two trigger trees. """
for key, val in node2.items():
if key is None:
node1.setdefault(None, OrderedSet())
node1[None].update(val)
else:
node1.setdefault(key, {})
trigger_tree_merge(node1[key], node2[key])
class MetaModel(api.Meta):
""" The metaclass of all model classes.
Its main purpose is to register the models per module.
"""
module_to_models = defaultdict(list)
def __new__(meta, name, bases, attrs):
# this prevents assignment of non-fields on recordsets
attrs.setdefault('__slots__', ())
# this collects the fields defined on the class (via Field.__set_name__())
attrs.setdefault('_field_definitions', [])
if attrs.get('_register', True):
# determine '_module'
if '_module' not in attrs:
module = attrs['__module__']
assert module.startswith('odoo.addons.'), \
f"Invalid import of {module}.{name}, it should start with 'odoo.addons'."
attrs['_module'] = module.split('.')[2]
# determine model '_name' and normalize '_inherits'
inherit = attrs.get('_inherit', ())
if isinstance(inherit, str):
inherit = attrs['_inherit'] = [inherit]
if '_name' not in attrs:
attrs['_name'] = inherit[0] if len(inherit) == 1 else name
return super().__new__(meta, name, bases, attrs)
def __init__(self, name, bases, attrs):
super().__init__(name, bases, attrs)
if not attrs.get('_register', True):
return
# Remember which models to instantiate for this module.
if self._module:
self.module_to_models[self._module].append(self)
if not self._abstract and self._name not in self._inherit:
# this class defines a model: add magic fields
def add(name, field):
setattr(self, name, field)
field.__set_name__(self, name)
def add_default(name, field):
if name not in attrs:
setattr(self, name, field)
field.__set_name__(self, name)
add('id', fields.Id(automatic=True))
add(self.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', automatic=True,
compute='_compute_concurrency_field', compute_sudo=False))
add_default('display_name', fields.Char(
string='Display Name', automatic=True, compute='_compute_display_name'))
if attrs.get('_log_access', self._auto):
add_default('create_uid', fields.Many2one(
'res.users', string='Created by', automatic=True, readonly=True))
add_default('create_date', fields.Datetime(
string='Created on', automatic=True, readonly=True))
add_default('write_uid', fields.Many2one(
'res.users', string='Last Updated by', automatic=True, readonly=True))
add_default('write_date', fields.Datetime(
string='Last Updated on', automatic=True, readonly=True))
class NewId(object):
""" Pseudo-ids for new records, encapsulating an optional origin id (actual
record id) and an optional reference (any value).
"""
__slots__ = ['origin', 'ref']
def __init__(self, origin=None, ref=None):
self.origin = origin
self.ref = ref
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, NewId) and (
(self.origin and other.origin and self.origin == other.origin)
or (self.ref and other.ref and self.ref == other.ref)
)
def __hash__(self):
return hash(self.origin or self.ref or id(self))
def __repr__(self):
return (
"<NewId origin=%r>" % self.origin if self.origin else
"<NewId ref=%r>" % self.ref if self.ref else
"<NewId 0x%x>" % id(self)
)
def __str__(self):
if self.origin or self.ref:
id_part = repr(self.origin or self.ref)
else:
id_part = hex(id(self))
return "NewId_%s" % id_part
def origin_ids(ids):
""" Return an iterator over the origin ids corresponding to ``ids``.
Actual ids are returned as is, and ids without origin are not returned.
"""
return ((id_ or id_.origin) for id_ in ids if (id_ or getattr(id_, "origin", None)))
def expand_ids(id0, ids):
""" Return an iterator of unique ids from the concatenation of ``[id0]`` and
``ids``, and of the same kind (all real or all new).
"""
yield id0
seen = {id0}
kind = bool(id0)
for id_ in ids:
if id_ not in seen and bool(id_) == kind:
yield id_
seen.add(id_)
IdType = (int, str, NewId)
# maximum number of prefetched records
PREFETCH_MAX = 1000
# special columns automatically created by the ORM
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
# valid SQL aggregation functions
VALID_AGGREGATE_FUNCTIONS = {
'array_agg', 'count', 'count_distinct',
'bool_and', 'bool_or', 'max', 'min', 'avg', 'sum',
}
# THE DEFINITION AND REGISTRY CLASSES
#
# The framework deals with two kinds of classes for models: the "definition"
# classes and the "registry" classes.
#
# The "definition" classes are the ones defined in modules source code: they
# define models and extend them. Those classes are essentially "static", for
# whatever that means in Python. The only exception is custom models: their
# definition class is created dynamically.
#
# The "registry" classes are the ones you find in the registry. They are the
# actual classes of the recordsets of their model. The "registry" class of a
# model is created dynamically when the registry is built. It inherits (in the
# Python sense) from all the definition classes of the model, and possibly other
# registry classes (when the model inherits from another model). It also
# carries model metadata inferred from its parent classes.
#
#
# THE REGISTRY CLASS OF A MODEL
#
# In the simplest case, a model's registry class inherits from all the classes
# that define the model in a flat hierarchy. Consider the model definition
# below. The registry class of model 'a' inherits from the definition classes
# A1, A2, A3, in reverse order, to match the expected overriding order. The
# registry class carries inferred metadata that is shared between all the
# model's instances for a given registry.
#
# class A1(Model): Model
# _name = 'a' / | \
# A3 A2 A1 <- definition classes
# class A2(Model): \ | /
# _inherit = 'a' a <- registry class: registry['a']
# |
# class A3(Model): records <- model instances, like env['a']
# _inherit = 'a'
#
# Note that when the model inherits from another model, we actually make the
# registry classes inherit from each other, so that extensions to an inherited
# model are visible in the registry class of the child model, like in the
# following example.
#
# class A1(Model):
# _name = 'a' Model
# / / \ \
# class B1(Model): / / \ \
# _name = 'b' / A2 A1 \
# B2 \ / B1
# class B2(Model): \ \ / /
# _name = 'b' \ a /
# _inherit = ['a', 'b'] \ | /
# \ | /
# class A2(Model): b
# _inherit = 'a'
#
#
# THE FIELDS OF A MODEL
#
# The fields of a model are given by the model's definition classes, inherited
# models ('_inherit' and '_inherits') and other parties, like custom fields.
# Note that a field can be partially overridden when it appears on several
# definition classes of its model. In that case, the field's final definition
# depends on the presence or absence of each definition class, which itself
# depends on the modules loaded in the registry.
#
# By design, the registry class has access to all the fields on the model's
# definition classes. When possible, the field is used directly from the
# model's registry class. There are a number of cases where the field cannot be
# used directly:
# - the field is related (and bits may not be shared);
# - the field is overridden on definition classes;
# - the field is defined for another model (and accessible by mixin).
#
# The last case prevents sharing the field, because the field object is specific
# to a model, and is used as a key in several key dictionaries, like the record
# cache and pending computations.
#
# Setting up a field on its definition class helps saving memory and time.
# Indeed, when sharing is possible, the field's setup is almost entirely done
# where the field was defined. It is thus done when the definition class was
# created, and it may be reused across registries.
#
# In the example below, the field 'foo' appears once on its model's definition
# classes. Assuming that it is not related, that field can be set up directly
# on its definition class. If the model appears in several registries, the
# field 'foo' is effectively shared across registries.
#
# class A1(Model): Model
# _name = 'a' / \
# foo = ... / \
# bar = ... A2 A1
# bar foo, bar
# class A2(Model): \ /
# _inherit = 'a' \ /
# bar = ... a
# bar
#
# On the other hand, the field 'bar' is overridden in its model's definition
# classes. In that case, the framework recreates the field on the model's
# registry class. The field's setup will be based on its definitions, and will
# not be shared across registries.
#
# The so-called magic fields ('id', 'display_name', ...) used to be added on
# registry classes. But doing so prevents them from being shared. So instead,
# we add them on definition classes that define a model without extending it.
# This increases the number of fields that are shared across registries.
def is_definition_class(cls):
""" Return whether ``cls`` is a model definition class. """
return isinstance(cls, MetaModel) and getattr(cls, 'pool', None) is None
def is_registry_class(cls):
""" Return whether ``cls`` is a model registry class. """
return getattr(cls, 'pool', None) is not None
class BaseModel(metaclass=MetaModel):
"""Base class for Odoo models.
Odoo models are created by inheriting one of the following:
* :class:`Model` for regular database-persisted models
* :class:`TransientModel` for temporary data, stored in the database but
automatically vacuumed every so often
* :class:`AbstractModel` for abstract super classes meant to be shared by
multiple inheriting models
The system automatically instantiates every model once per database. Those
instances represent the available models on each database, and depend on
which modules are installed on that database. The actual class of each
instance is built from the Python classes that create and inherit from the
corresponding model.
Every model instance is a "recordset", i.e., an ordered collection of
records of the model. Recordsets are returned by methods like
:meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
explicit representation: a record is represented as a recordset of one
record.
To create a class that should not be instantiated,
the :attr:`~odoo.models.BaseModel._register` attribute may be set to False.
"""
__slots__ = ['env', '_ids', '_prefetch_ids']
_auto = False
"""Whether a database table should be created.
If set to ``False``, override :meth:`~odoo.models.BaseModel.init`
to create the database table.
Automatically defaults to `True` for :class:`Model` and
:class:`TransientModel`, `False` for :class:`AbstractModel`.
.. tip:: To create a model without any table, inherit
from :class:`~odoo.models.AbstractModel`.
"""
_register = False #: registry visibility
_abstract = True
""" Whether the model is *abstract*.
.. seealso:: :class:`AbstractModel`
"""
_transient = False
""" Whether the model is *transient*.
.. seealso:: :class:`TransientModel`
"""
_name = None #: the model name (in dot-notation, module namespace)
_description = None #: the model's informal name
_module = None #: the model's module (in the Odoo sense)
_custom = False #: should be True for custom models only
_inherit = ()
"""Python-inherited models:
:type: str or list(str)
.. note::
* If :attr:`._name` is set, name(s) of parent models to inherit from
* If :attr:`._name` is unset, name of a single model to extend in-place
"""
_inherits = frozendict()
"""dictionary {'parent_model': 'm2o_field'} mapping the _name of the parent business
objects to the names of the corresponding foreign key fields to use::
_inherits = {
'a.model': 'a_field_id',
'b.model': 'b_field_id'
}
implements composition-based inheritance: the new model exposes all
the fields of the inherited models but stores none of them:
the values themselves remain stored on the linked record.
.. warning::
if multiple fields with the same name are defined in the
:attr:`~odoo.models.Model._inherits`-ed models, the inherited field will
correspond to the last one (in the inherits list order).
"""
_table = None #: SQL table name used by model if :attr:`_auto`
_table_query = None #: SQL expression of the table's content (optional)
_sequence = None #: SQL sequence to use for ID field
_sql_constraints = [] #: SQL constraints [(name, sql_def, message)]
_rec_name = None #: field to use for labeling records, default: ``name``
_order = 'id' #: default order field for searching results
_parent_name = 'parent_id' #: the many2one field used as parent field
_parent_store = False
"""set to True to compute parent_path field.
Alongside a :attr:`~.parent_path` field, sets up an indexed storage
of the tree structure of records, to enable faster hierarchical queries
on the records of the current model using the ``child_of`` and
``parent_of`` domain operators.
"""
_active_name = None #: field to use for active records
_date_name = 'date' #: field to use for default calendar view
_fold_name = 'fold' #: field to determine folded groups in kanban views
_needaction = False # whether the model supports "need actions" (Old API)
_translate = True # False disables translations export for this model (Old API)
_check_company_auto = False
"""On write and create, call ``_check_company`` to ensure companies
consistency on the relational fields having ``check_company=True``
as attribute.
"""
_depends = frozendict()
"""dependencies of models backed up by SQL views
``{model_name: field_names}``, where ``field_names`` is an iterable.
This is only used to determine the changes to flush to database before
executing ``search()`` or ``read_group()``. It won't be used for cache
invalidation or recomputing fields.
"""
# default values for _transient_vacuum()
_transient_max_count = lazy_classproperty(lambda _: config.get('osv_memory_count_limit'))
_transient_max_hours = lazy_classproperty(lambda _: config.get('transient_age_limit'))
CONCURRENCY_CHECK_FIELD = '__last_update'
@api.model
def view_init(self, fields_list):
""" Override this method to do specific things when a form view is
opened. This method is invoked by :meth:`~default_get`.
"""
pass
def _valid_field_parameter(self, field, name):
""" Return whether the given parameter name is valid for the field. """
return name == 'related_sudo'
@api.model
def _add_field(self, name, field):
""" Add the given ``field`` under the given ``name`` in the class """
cls = type(self)
# add field as an attribute and in cls._fields (for reflection)
if not isinstance(getattr(cls, name, field), Field):
_logger.warning("In model %r, field %r overriding existing value", cls._name, name)
setattr(cls, name, field)
field._toplevel = True
field.__set_name__(cls, name)
cls._fields[name] = field
@api.model
def _pop_field(self, name):
""" Remove the field with the given ``name`` from the model.
This method should only be used for manual fields.
"""
cls = type(self)
field = cls._fields.pop(name, None)
discardattr(cls, name)
if cls._rec_name == name:
# fixup _rec_name and display_name's dependencies
cls._rec_name = None
if cls.display_name in cls.pool.field_depends:
cls.pool.field_depends[cls.display_name] = tuple(
dep for dep in cls.pool.field_depends[cls.display_name] if dep != name
)
return field
@api.depends(lambda model: ('create_date', 'write_date') if model._log_access else ())
def _compute_concurrency_field(self):
fname = self.CONCURRENCY_CHECK_FIELD
if self._log_access:
for record in self:
record[fname] = record.write_date or record.create_date or Datetime.now()
else:
self[fname] = odoo.fields.Datetime.now()
#
# Goal: try to apply inheritance at the instantiation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instantiate a given model in the registry.
This method creates or extends a "registry" class for the given model.
This "registry" class carries inferred model metadata, and inherits (in
the Python sense) from all classes that define the model, and possibly
other registry classes.
"""
if getattr(cls, '_constraints', None):
_logger.warning("Model attribute '_constraints' is no longer supported, "
"please use @api.constrains on methods instead.")
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# all models except 'base' implicitly inherit from 'base'
name = cls._name
parents = list(cls._inherit)
if name != 'base':
parents.append('base')
# create or retrieve the model's class
if name in parents:
if name not in pool:
raise TypeError("Model %r does not exist in registry." % name)
ModelClass = pool[name]
ModelClass._build_model_check_base(cls)
check_parent = ModelClass._build_model_check_parent
else:
ModelClass = type(name, (cls,), {
'_name': name,
'_register': False,
'_original_module': cls._module,
'_inherit_module': {}, # map parent to introducing module
'_inherit_children': OrderedSet(), # names of children models
'_inherits_children': set(), # names of children models
'_fields': {}, # populated in _setup_base()
})
check_parent = cls._build_model_check_parent
# determine all the classes the model should inherit from
bases = LastOrderedSet([cls])
for parent in parents:
if parent not in pool:
raise TypeError("Model %r inherits from non-existing model %r." % (name, parent))
parent_class = pool[parent]
if parent == name:
for base in parent_class.__base_classes:
bases.add(base)
else:
check_parent(cls, parent_class)
bases.add(parent_class)
ModelClass._inherit_module[parent] = cls._module
parent_class._inherit_children.add(name)
# ModelClass.__bases__ must be assigned those classes; however, this
# operation is quite slow, so we do it once in method _prepare_setup()
ModelClass.__base_classes = tuple(bases)
# determine the attributes of the model's class
ModelClass._build_model_attributes(pool)
check_pg_name(ModelClass._table)
# Transience
if ModelClass._transient:
assert ModelClass._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their vacuum policy"
# link the class to the registry, and update the registry
ModelClass.pool = pool
pool[name] = ModelClass
# backward compatibility: instantiate the model, and initialize it
model = object.__new__(ModelClass)
model.__init__(pool, cr)
return ModelClass
@classmethod
def _build_model_check_base(model_class, cls):
""" Check whether ``model_class`` can be extended with ``cls``. """
if model_class._abstract and not cls._abstract:
msg = ("%s transforms the abstract model %r into a non-abstract model. "
"That class should either inherit from AbstractModel, or set a different '_name'.")
raise TypeError(msg % (cls, model_class._name))
if model_class._transient != cls._transient:
if model_class._transient:
msg = ("%s transforms the transient model %r into a non-transient model. "
"That class should either inherit from TransientModel, or set a different '_name'.")
else:
msg = ("%s transforms the model %r into a transient model. "
"That class should either inherit from Model, or set a different '_name'.")
raise TypeError(msg % (cls, model_class._name))
@classmethod
def _build_model_check_parent(model_class, cls, parent_class):
""" Check whether ``model_class`` can inherit from ``parent_class``. """
if model_class._abstract and not parent_class._abstract:
msg = ("In %s, the abstract model %r cannot inherit from the non-abstract model %r.")
raise TypeError(msg % (cls, model_class._name, parent_class._name))
@classmethod
def _build_model_attributes(cls, pool):
""" Initialize base model attributes. """
cls._description = cls._name
cls._table = cls._name.replace('.', '_')
cls._sequence = None
cls._log_access = cls._auto
inherits = {}
depends = {}
cls._sql_constraints = {}
for base in reversed(cls.__base_classes):
if is_definition_class(base):
# the following attributes are not taken from registry classes
if cls._name not in base._inherit and not base._description:
_logger.warning("The model %s has no _description", cls._name)
cls._description = base._description or cls._description
cls._table = base._table or cls._table
cls._sequence = base._sequence or cls._sequence
cls._log_access = getattr(base, '_log_access', cls._log_access)
inherits.update(base._inherits)
for mname, fnames in base._depends.items():
depends.setdefault(mname, []).extend(fnames)
for cons in base._sql_constraints:
cls._sql_constraints[cons[0]] = cons
cls._sequence = cls._sequence or (cls._table + '_id_seq')
cls._sql_constraints = list(cls._sql_constraints.values())
# avoid assigning an empty dict to save memory
if inherits:
cls._inherits = inherits
if depends:
cls._depends = depends
# update _inherits_children of parent models
for parent_name in cls._inherits:
pool[parent_name]._inherits_children.add(cls._name)
# recompute attributes of _inherit_children models
for child_name in cls._inherit_children:
child_class = pool[child_name]
child_class._build_model_attributes(pool)
@classmethod
def _init_constraints_onchanges(cls):
# store list of sql constraint qualified names
for (key, _, _) in cls._sql_constraints:
cls.pool._sql_constraints.add(cls._table + '_' + key)
# reset properties memoized on cls
cls._constraint_methods = BaseModel._constraint_methods
cls._ondelete_methods = BaseModel._ondelete_methods
cls._onchange_methods = BaseModel._onchange_methods
@property
def _constraint_methods(self):
""" Return a list of methods implementing Python constraints. """
def is_constraint(func):
return callable(func) and hasattr(func, '_constrains')
def wrap(func, names):
# wrap func into a proxy function with explicit '_constrains'
@api.constrains(*names)
def wrapper(self):
return func(self)
return wrapper
cls = type(self)
methods = []
for attr, func in getmembers(cls, is_constraint):
if callable(func._constrains):
func = wrap(func, func._constrains(self))
for name in func._constrains:
field = cls._fields.get(name)
if not field:
_logger.warning("method %s.%s: @constrains parameter %r is not a field name", cls._name, attr, name)
elif not (field.store or field.inverse or field.inherited):
_logger.warning("method %s.%s: @constrains parameter %r is not writeable", cls._name, attr, name)
methods.append(func)
# optimization: memoize result on cls, it will not be recomputed
cls._constraint_methods = methods
return methods
@property
def _ondelete_methods(self):
""" Return a list of methods implementing checks before unlinking. """
def is_ondelete(func):
return callable(func) and hasattr(func, '_ondelete')
cls = type(self)
methods = [func for _, func in getmembers(cls, is_ondelete)]
# optimization: memoize results on cls, it will not be recomputed
cls._ondelete_methods = methods
return methods
@property
def _onchange_methods(self):
""" Return a dictionary mapping field names to onchange methods. """
def is_onchange(func):
return callable(func) and hasattr(func, '_onchange')
# collect onchange methods on the model's class
cls = type(self)
methods = defaultdict(list)
for attr, func in getmembers(cls, is_onchange):
missing = []
for name in func._onchange:
if name not in cls._fields:
missing.append(name)
methods[name].append(func)
if missing:
_logger.warning(
"@api.onchange%r parameters must be field names -> not valid: %s",
func._onchange, missing
)
# add onchange methods to implement "change_default" on fields
def onchange_default(field, self):
value = field.convert_to_write(self[field.name], self)
condition = "%s=%s" % (field.name, value)
defaults = self.env['ir.default'].get_model_defaults(self._name, condition)
self.update(defaults)
for name, field in cls._fields.items():
if field.change_default:
methods[name].append(functools.partial(onchange_default, field))
# optimization: memoize result on cls, it will not be recomputed
cls._onchange_methods = methods
return methods
def __new__(cls):
# In the past, this method was registering the model class in the server.
# This job is now done entirely by the metaclass MetaModel.
return None
def __init__(self, pool, cr):
""" Deprecated method to initialize the model. """
pass
def _is_an_ordinary_table(self):
return self.pool.is_an_ordinary_table(self)
def __ensure_xml_id(self, skip=False):
""" Create missing external ids for records in ``self``, and return an
iterator of pairs ``(record, xmlid)`` for the records in ``self``.
:rtype: Iterable[Model, str | None]
"""
if skip:
return ((record, None) for record in self)
if not self:
return iter([])
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
modname = '__export__'
cr = self.env.cr
cr.execute("""
SELECT res_id, module, name
FROM ir_model_data
WHERE model = %s AND res_id in %s
""", (self._name, tuple(self.ids)))
xids = {
res_id: (module, name)
for res_id, module, name in cr.fetchall()
}
def to_xid(record_id):
(module, name) = xids[record_id]
return ('%s.%s' % (module, name)) if module else name
# create missing xml ids
missing = self.filtered(lambda r: r.id not in xids)
if not missing:
return (
(record, to_xid(record.id))
for record in self
)
xids.update(
(r.id, (modname, '%s_%s_%s' % (
r._table,
r.id,
uuid.uuid4().hex[:8],
)))
for r in missing
)
fields = ['module', 'model', 'name', 'res_id']
# disable eventual async callback / support for the extent of
# the COPY FROM, as these are apparently incompatible
callback = psycopg2.extensions.get_wait_callback()
psycopg2.extensions.set_wait_callback(None)
try:
cr.copy_from(io.StringIO(
u'\n'.join(
u"%s\t%s\t%s\t%d" % (
modname,
record._name,
xids[record.id][1],
record.id,
)
for record in missing
)),
table='ir_model_data',
columns=fields,
)
finally:
psycopg2.extensions.set_wait_callback(callback)
self.env['ir.model.data'].invalidate_cache(fnames=fields)
return (
(record, to_xid(record.id))
for record in self
)
def _export_rows(self, fields, *, _is_toplevel_call=True):
""" Export fields of the records in ``self``.
:param fields: list of lists of fields to traverse
:param bool _is_toplevel_call:
used when recursing, avoid using when calling from outside
:return: list of lists of corresponding values
"""
import_compatible = self.env.context.get('import_compat', True)
lines = []
def splittor(rs):
""" Splits the self recordset in batches of 1000 (to avoid
entire-recordset-prefetch-effects) & removes the previous batch
from the cache after it's been iterated in full
"""
for idx in range(0, len(rs), 1000):
sub = rs[idx:idx+1000]
for rec in sub:
yield rec
rs.invalidate_cache(ids=sub.ids)
if not _is_toplevel_call:
splittor = lambda rs: rs
# memory stable but ends up prefetching 275 fields (???)
for record in splittor(self):
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = (record._name, record.id)
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, record)
else:
primary_done.append(name)
# recursively export the fields that follow name; use
# 'display_name' where no subfield is exported
fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else [])
for p in fields]
# in import_compat mode, m2m should always be exported as
# a comma-separated list of xids or names in a single cell
if import_compatible and field.type == 'many2many':
index = None
# find out which subfield the user wants & its
# location as we might not get it as the first
# column we encounter
for name in ['id', 'name', 'display_name']:
with contextlib.suppress(ValueError):
index = fields2.index([name])
break
if index is None:
# not found anything, assume we just want the
# name_get in the first column
name = None
index = i
if name == 'id':
xml_ids = [xid for _, xid in value.__ensure_xml_id()]
current[index] = ','.join(xml_ids)
else:
current[index] = field.convert_to_export(value, record)
continue
lines2 = value._export_rows(fields2, _is_toplevel_call=False)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val or isinstance(val, (int, float)):
current[j] = val
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = ''
# if any xid should be exported, only do so at toplevel
if _is_toplevel_call and any(f[-1] == 'id' for f in fields):
bymodels = collections.defaultdict(set)
xidmap = collections.defaultdict(list)
# collect all the tuples in "lines" (along with their coordinates)
for i, line in enumerate(lines):
for j, cell in enumerate(line):
if type(cell) is tuple:
bymodels[cell[0]].add(cell[1])
xidmap[cell].append((i, j))
# for each model, xid-export everything and inject in matrix
for model, ids in bymodels.items():
for record, xid in self.env[model].browse(ids).__ensure_xml_id():
for i, j in xidmap.pop((record._name, record.id)):
lines[i][j] = xid
assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items())
return lines
# backward compatibility
__export_rows = _export_rows
def export_data(self, fields_to_export):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
if not (self.env.is_admin() or self.env.user.has_group('base.group_allow_export')):
raise UserError(_("You don't have the rights to export data. Please contact an Administrator."))
fields_to_export = [fix_import_export_id_paths(f) for f in fields_to_export]
return {'datas': self._export_rows(fields_to_export)}
@api.model
def load(self, fields, data):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:returns: {ids: list(int)|False, messages: [Message][, lastrow: int]}
"""
self.flush()
# determine values of mode, current_module and noupdate
mode = self._context.get('mode', 'init')
current_module = self._context.get('module', '__import__')
noupdate = self._context.get('noupdate', False)
# add current module in context for the conversion of xml ids
self = self.with_context(_import_current_module=current_module)
cr = self._cr
cr.execute('SAVEPOINT model_load')
fields = [fix_import_export_id_paths(f) for f in fields]
fg = self.fields_get()
ids = []
messages = []
ModelData = self.env['ir.model.data']
# list of (xid, vals, info) for records to be created in batch
batch = []
batch_xml_ids = set()
# models in which we may have created / modified data, therefore might
# require flushing in order to name_search: the root model and any
# o2m
creatable_models = {self._name}
for field_path in fields:
if field_path[0] in (None, 'id', '.id'):
continue
model_fields = self._fields
if isinstance(model_fields[field_path[0]], odoo.fields.Many2one):
# this only applies for toplevel m2o (?) fields
if field_path[0] in (self.env.context.get('name_create_enabled_fieds') or {}):
creatable_models.add(model_fields[field_path[0]].comodel_name)
for field_name in field_path:
if field_name in (None, 'id', '.id'):
break
if isinstance(model_fields[field_name], odoo.fields.One2many):
comodel = model_fields[field_name].comodel_name
creatable_models.add(comodel)
model_fields = self.env[comodel]._fields
def flush(*, xml_id=None, model=None):
if not batch:
return
assert not (xml_id and model), \
"flush can specify *either* an external id or a model, not both"
if xml_id and xml_id not in batch_xml_ids:
if xml_id not in self.env:
return
if model and model not in creatable_models:
return
data_list = [
dict(xml_id=xid, values=vals, info=info, noupdate=noupdate)
for xid, vals, info in batch
]
batch.clear()
batch_xml_ids.clear()
# try to create in batch
try:
with cr.savepoint():
recs = self._load_records(data_list, mode == 'update')
ids.extend(recs.ids)
return
except psycopg2.InternalError as e:
# broken transaction, exit and hope the source error was already logged
if not any(message['type'] == 'error' for message in messages):
info = data_list[0]['info']
messages.append(dict(info, type='error', message=_(u"Unknown database error: '%s'", e)))
return
except Exception:
pass
errors = 0
# try again, this time record by record
for i, rec_data in enumerate(data_list, 1):
try:
with cr.savepoint():
rec = self._load_records([rec_data], mode == 'update')
ids.append(rec.id)
except psycopg2.Warning as e:
info = rec_data['info']
messages.append(dict(info, type='warning', message=str(e)))
except psycopg2.Error as e:
info = rec_data['info']
messages.append(dict(info, type='error', **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
errors += 1
except Exception as e:
_logger.debug("Error while loading record", exc_info=True)
info = rec_data['info']
message = (_(u'Unknown error during import:') + u' %s: %s' % (type(e), e))
moreinfo = _('Resolve other errors first')
messages.append(dict(info, type='error', message=message, moreinfo=moreinfo))
# Failed for some reason, perhaps due to invalid data supplied,
# rollback savepoint and keep going
errors += 1
if errors >= 10 and (errors >= i / 10):
messages.append({
'type': 'warning',
'message': _(u"Found more than 10 errors and more than one error per 10 records, interrupted to avoid showing too many errors.")
})
break
# make 'flush' available to the methods below, in the case where XMLID
# resolution fails, for instance
flush_self = self.with_context(import_flush=flush, import_cache=LRU(1024))
# TODO: break load's API instead of smuggling via context?
limit = self._context.get('_import_limit')
if limit is None:
limit = float('inf')
extracted = flush_self._extract_records(fields, data, log=messages.append, limit=limit)
converted = flush_self._convert_records(extracted, log=messages.append)
info = {'rows': {'to': -1}}
for id, xid, record, info in converted:
if self.env.context.get('import_file') and self.env.context.get('import_skip_records'):
if any([record.get(field) is None for field in self.env.context['import_skip_records']]):
continue
if xid:
xid = xid if '.' in xid else "%s.%s" % (current_module, xid)
batch_xml_ids.add(xid)
elif id:
record['id'] = id
batch.append((xid, record, info))
flush()
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
# cancel all changes done to the registry/ormcache
self.pool.reset_changes()
nextrow = info['rows']['to'] + 1
if nextrow < limit:
nextrow = 0
return {
'ids': ids,
'messages': messages,
'nextrow': nextrow,
}
def _add_fake_fields(self, fields):
from odoo.fields import Char, Integer
fields[None] = Char('rec_name')
fields['id'] = Char('External ID')
fields['.id'] = Integer('Database ID')
return fields
def _extract_records(self, fields_, data, log=lambda a: None, limit=float('inf')):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
fields = dict(self._fields)
# Fake fields to avoid special cases in extractor
fields = self._add_fake_fields(fields)
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: fields[field].relational
get_o2m_values = itemgetter_tuple([
index
for index, fnames in enumerate(fields_)
if fields[fnames[0]].type == 'one2many'
])
get_nono2m_values = itemgetter_tuple([
index
for index, fnames in enumerate(fields_)
if fields[fnames[0]].type != 'one2many'
])
# Checks if the provided row has any non-empty one2many fields
def only_o2m_values(row):
return any(get_o2m_values(row)) and not any(get_nono2m_values(row))
index = 0
while index < len(data) and index < limit:
row = data[index]
# copy non-relational fields to record dict
record = {fnames[0]: value
for fnames, value in zip(fields_, row)
if not is_relational(fnames[0])}
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(fnames[0] for fnames in fields_ if is_relational(fnames[0])):
comodel = self.env[fields[relfield].comodel_name]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get field
indices, subfields = zip(*((index, fnames[1:] or [None])
for index, fnames in enumerate(fields_)
if fnames[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = [it for it in map(itemgetter_tuple(indices), record_span) if any(it)]
record[relfield] = [
subrecord
for subrecord, _subinfo in comodel._extract_records(subfields, relfield_data, log=log)
]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1,
}}
index += len(record_span)
@api.model
def _convert_records(self, records, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
field_names = {name: field.string for name, field in self._fields.items()}
if self.env.lang:
field_names.update(self.env['ir.translation'].get_field_string(self._name))
convert = self.env['ir.fields.converter'].for_model(self)
def _log(base, record, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
field_name = field_names[field]
exc_vals = dict(base, record=record, field=field_name)
record = dict(base, type=type, record=record, field=field,
message=str(exception.args[0]) % exc_vals)
if len(exception.args) > 1:
info = {}
if exception.args[1] and isinstance(exception.args[1], dict):
info = exception.args[1]
# ensure field_name is added to the exception. Used in import to
# concatenate multiple errors in the same block
info['field_name'] = field_name
record.update(info)
log(record)
stream = CountingStream(records)
for record, extras in stream:
# xid
xid = record.get('id', False)
# dbid
dbid = False
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search([('id', '=', dbid)]):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'", dbid)))
dbid = False
converted = convert(record, functools.partial(_log, extras, stream.index))
yield dbid, xid, converted, dict(extras, record=stream.index)
def _validate_fields(self, field_names, excluded_names=()):
""" Invoke the constraint methods for which at least one field name is
in ``field_names`` and none is in ``excluded_names``.
"""
field_names = set(field_names)
excluded_names = set(excluded_names)
for check in self._constraint_methods:
if (not field_names.isdisjoint(check._constrains)
and excluded_names.isdisjoint(check._constrains)):
check(self)
@api.model
def default_get(self, fields_list):
""" default_get(fields_list) -> default_values
Return default values for the fields in ``fields_list``. Default
values are determined by the context, user defaults, and the model
itself.
:param list fields_list: names of field whose default is requested
:return: a dictionary mapping field names to their corresponding default values,
if they have a default value.
:rtype: dict
.. note::
Unrequested defaults won't be considered, there is no need to return a
value for fields whose names are not in `fields_list`.
"""
# trigger view init hook
self.view_init(fields_list)
defaults = {}
parent_fields = defaultdict(list)
ir_defaults = self.env['ir.default'].get_model_defaults(self._name)
for name in fields_list:
# 1. look up context
key = 'default_' + name
if key in self._context:
defaults[name] = self._context[key]
continue
# 2. look up ir.default
if name in ir_defaults:
defaults[name] = ir_defaults[name]
continue
field = self._fields.get(name)
# 3. look up field.default
if field and field.default:
defaults[name] = field.default(self)
continue
# 4. delegate to parent model
if field and field.inherited:
field = field.related_field
parent_fields[field.model_name].append(field.name)
# convert default values to the right format
#
# we explicitly avoid using _convert_to_write() for x2many fields,
# because the latter leaves values like [(Command.LINK, 2),
# (Command.LINK, 3)], which are not supported by the web client as
# default values; stepping through the cache allows to normalize
# such a list to [(Command.SET, 0, [2, 3])], which is properly
# supported by the web client
for fname, value in defaults.items():
if fname in self._fields:
field = self._fields[fname]
value = field.convert_to_cache(value, self, validate=False)
defaults[fname] = field.convert_to_write(value, self)
# add default values for inherited fields
for model, names in parent_fields.items():
defaults.update(self.env[model].default_get(names))
return defaults
@api.model
def fields_get_keys(self):
return list(self._fields)
@api.model
def _rec_name_fallback(self):
# if self._rec_name is set, it belongs to self._fields
return self._rec_name or 'id'
#
# Override this method if you need a window title that depends on the context
#
@api.model
def view_header_get(self, view_id=None, view_type='form'):
return False
@api.model
def user_has_groups(self, groups):
"""Return true if the user is member of at least one of the groups in
``groups``, and is not a member of any of the groups in ``groups``
preceded by ``!``. Typically used to resolve ``groups`` attribute in
view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g., ``base.group_user,base.group_system``,
optionally preceded by ``!``
:return: True if the current user is a member of one of the given groups
not preceded by ``!`` and is not member of any of the groups
preceded by ``!``
"""
from odoo.http import request
user = self.env.user
has_groups = []
not_has_groups = []
for group_ext_id in groups.split(','):
group_ext_id = group_ext_id.strip()
if group_ext_id[0] == '!':
not_has_groups.append(group_ext_id[1:])
else:
has_groups.append(group_ext_id)
for group_ext_id in not_has_groups:
if group_ext_id == 'base.group_no_one':
# check: the group_no_one is effective in debug mode only
if user.has_group(group_ext_id) and request and request.session.debug:
return False
else:
if user.has_group(group_ext_id):
return False
for group_ext_id in has_groups:
if group_ext_id == 'base.group_no_one':
# check: the group_no_one is effective in debug mode only
if user.has_group(group_ext_id) and request and request.session.debug:
return True
else:
if user.has_group(group_ext_id):
return True
return not has_groups
@api.model
def _get_default_form_view(self):
""" Generates a default single-line form view using all fields
of the current model.
:returns: a form view as an lxml document
:rtype: etree._Element
"""
group = E.group(col="4")
for fname, field in self._fields.items():
if field.automatic:
continue
elif field.type in ('one2many', 'many2many', 'text', 'html'):
group.append(E.newline())
group.append(E.field(name=fname, colspan="4"))
group.append(E.newline())
else:
group.append(E.field(name=fname))
group.append(E.separator())
return E.form(E.sheet(group, string=self._description))
@api.model
def _get_default_search_view(self):
""" Generates a single-field search view, based on _rec_name.
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
element = E.field(name=self._rec_name_fallback())
return E.search(element, string=self._description)
@api.model
def _get_default_tree_view(self):
""" Generates a single-field tree view, based on _rec_name.
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
element = E.field(name=self._rec_name_fallback())
return E.tree(element, string=self._description)
@api.model
def _get_default_pivot_view(self):
""" Generates an empty pivot view.
:returns: a pivot view as an lxml document
:rtype: etree._Element
"""
return E.pivot(string=self._description)
@api.model
def _get_default_kanban_view(self):
""" Generates a single-field kanban view, based on _rec_name.
:returns: a kanban view as an lxml document
:rtype: etree._Element
"""
field = E.field(name=self._rec_name_fallback())
content_div = E.div(field, {'class': "o_kanban_card_content"})
card_div = E.div(content_div, {'t-attf-class': "oe_kanban_card oe_kanban_global_click"})
kanban_box = E.t(card_div, {'t-name': "kanban-box"})
templates = E.templates(kanban_box)
return E.kanban(templates, string=self._description)
@api.model
def _get_default_graph_view(self):
""" Generates a single-field graph view, based on _rec_name.
:returns: a graph view as an lxml document
:rtype: etree._Element
"""
element = E.field(name=self._rec_name_fallback())
return E.graph(element, string=self._description)
@api.model
def _get_default_calendar_view(self):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the ``view`` being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = E.calendar(string=self._description)
view.append(E.field(name=self._rec_name_fallback()))
if not set_first_of([self._date_name, 'date', 'date_start', 'x_date', 'x_date_start'],
self._fields, 'date_start'):
raise UserError(_("Insufficient fields for Calendar View!"))
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._fields, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._fields, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._fields, 'date_delay'):
raise UserError(_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay", self._name))
return view
@api.model
def load_views(self, views, options=None):
""" Returns the fields_views of given views, along with the fields of
the current model, and optionally its filters for the given action.
:param views: list of [view_id, view_type]
:param options['toolbar']: True to include contextual actions when loading fields_views
:param options['load_filters']: True to return the model's filters
:param options['action_id']: id of the action to get the filters
:return: dictionary with fields_views, fields and optionally filters
"""
options = options or {}
result = {}
toolbar = options.get('toolbar')
result['fields_views'] = {
v_type: self.fields_view_get(v_id, v_type if v_type != 'list' else 'tree',
toolbar=toolbar if v_type != 'search' else False)
for [v_id, v_type] in views
}
result['fields'] = self.fields_get()
if options.get('load_filters'):
result['filters'] = self.env['ir.filters'].get_filters(self._name, options.get('action_id'))
return result
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
View = self.env['ir.ui.view'].sudo()
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to override the default view
view_ref_key = view_type + '_view_ref'
view_ref = self._context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
query = "SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s"
self._cr.execute(query, (module, view_ref))
view_ref_res = self._cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(self._name, view_type)
if view_id:
# read the view with inherited views applied
view = View.browse(view_id)
result['arch'] = view.get_combined_arch()
result['name'] = view.name
result['type'] = view.type
result['view_id'] = view.id
result['field_parent'] = view.field_parent
result['base_model'] = view.model
else:
# fallback on default views methods if no ir.ui.view could be found
try:
arch_etree = getattr(self, '_get_default_%s_view' % view_type)()
result['arch'] = etree.tostring(arch_etree, encoding='unicode')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise UserError(_("No default view of type '%s' could be found !", view_type))
return result
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
""" fields_view_get([view_id | view_type='form'])
Get the detailed composition of the requested view like fields, model, view architecture
:param int view_id: id of the view or None
:param str view_type: type of the view to return if view_id is None ('form', 'tree', ...)
:param bool toolbar: true to include contextual actions
:param submenu: deprecated
:return: composition of the requested view (including inherited views and extensions)
:rtype: dict
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
self.check_access_rights('read')
view = self.env['ir.ui.view'].sudo().browse(view_id)
# Get the view arch and all other attributes describing the composition of the view
result = self._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# Override context for postprocessing
if view_id and result.get('base_model', self._name) != self._name:
view = view.with_context(base_model_name=result['base_model'])
# Apply post processing, groups and modifiers etc...
xarch, xfields = view.postprocess_and_fields(etree.fromstring(result['arch']), model=self._name)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if asked
if toolbar:
vt = 'list' if view_type == 'tree' else view_type
bindings = self.env['ir.actions.actions'].get_bindings(self._name)
resreport = [action
for action in bindings['report']
if vt in (action.get('binding_view_types') or vt).split(',')]
resaction = [action
for action in bindings['action']
if vt in (action.get('binding_view_types') or vt).split(',')]
result['toolbar'] = {
'print': resreport,
'action': resaction,
}
return result
def get_formview_id(self, access_uid=None):
""" Return an view id to open the document ``self`` with. This method is
meant to be overridden in addons that want to give specific view ids
for example.
Optional access_uid holds the user that would access the form view
id different from the current environment user.
"""
return False
def get_formview_action(self, access_uid=None):
""" Return an action to open the document ``self``. This method is meant
to be overridden in addons that want to give specific view ids for
example.
An optional access_uid holds the user that will access the document
that could be different from the current user. """
view_id = self.sudo().get_formview_id(access_uid=access_uid)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': self.id,
'context': dict(self._context),
}
def get_access_action(self, access_uid=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific access to the document.
By default it opens the formview of the document.
An optional access_uid holds the user that will access the document
that could be different from the current user.
"""
return self[0].get_formview_action(access_uid=access_uid)
@api.model
def search_count(self, args):
""" search_count(args) -> int
Returns the number of records in the current model matching :ref:`the
provided domain <reference/orm/domains>`.
"""
res = self.search(args, count=True)
return res if isinstance(res, int) else len(res)
@api.model
@api.returns('self',
upgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else self.browse(value),
downgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else value.ids)
def search(self, args, offset=0, limit=None, order=None, count=False):
""" search(args[, offset=0][, limit=None][, order=None][, count=False])
Searches for records based on the ``args``
:ref:`search domain <reference/orm/domains>`.
:param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param int offset: number of results to ignore (default: none)
:param int limit: maximum number of records to return (default: all)
:param str order: sort string
:param bool count: if True, only counts and returns the number of matching records (default: False)
:returns: at most ``limit`` records matching the search criteria
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
"""
res = self._search(args, offset=offset, limit=limit, order=order, count=count)
return res if count else self.browse(res)
#
# display_name, name_get, name_create, name_search
#
@api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
def _compute_display_name(self):
"""Compute the value of the `display_name` field.
In general `display_name` is equal to calling `name_get()[0][1]`.
In that case, it is recommended to use `display_name` to uniformize the
code and to potentially take advantage of prefetch when applicable.
However some models might override this method. For them, the behavior
might differ, and it is important to select which of `display_name` or
`name_get()[0][1]` to call depending on the desired result.
"""
names = dict(self.name_get())
for record in self:
record.display_name = names.get(record.id, False)
def name_get(self):
""" name_get() -> [(id, name), ...]
Returns a textual representation for the records in ``self``.
By default this is the value of the ``display_name`` field.
:return: list of pairs ``(id, text_repr)`` for each records
:rtype: list(tuple)
"""
result = []
name = self._rec_name
if name in self._fields:
convert = self._fields[name].convert_to_display_name
for record in self:
result.append((record.id, convert(record[name], record)))
else:
for record in self:
result.append((record.id, "%s,%s" % (record._name, record.id)))
return result
@api.model
def name_create(self, name):
""" name_create(name) -> record
Create a new record by calling :meth:`~.create` with only one value
provided: the display name of the new record.
The new record will be initialized with any default values
applicable to this model, or provided through the context. The usual
behavior of :meth:`~.create` applies.
:param name: display name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value of the created record
"""
if self._rec_name:
record = self.create({self._rec_name: name})
return record.name_get()[0]
else:
_logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
return False
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
""" name_search(name='', args=None, operator='ilike', limit=100) -> records
Search for records that have a display name matching the given
``name`` pattern when compared with the given ``operator``, while also
matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial
value for a relational field. Sometimes be seen as the inverse
function of :meth:`~.name_get`, but it is not guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search
domain based on ``display_name`` and then :meth:`~.name_get` on the
result of the search.
:param str name: the name pattern to match
:param list args: optional search domain (see :meth:`~.search` for
syntax), specifying further restrictions
:param str operator: domain operator for matching ``name``, such as
``'like'`` or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id, text_repr)`` for all matching records.
"""
ids = self._name_search(name, args, operator, limit=limit)
return self.browse(ids).sudo().name_get()
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
""" _name_search(name='', args=None, operator='ilike', limit=100, name_get_uid=None) -> ids
Private implementation of name_search, allows passing a dedicated user
for the name_get part to solve some access rights issues.
"""
args = list(args or [])
# optimize out the default criterion of ``ilike ''`` that matches everything
if not self._rec_name:
_logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
elif not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
return self._search(args, limit=limit, access_rights_uid=name_get_uid)
@api.model
def _add_missing_default_values(self, values):
# avoid overriding inherited values when parent is set
avoid_models = set()
def collect_models_to_avoid(model):
for parent_mname, parent_fname in model._inherits.items():
if parent_fname in values:
avoid_models.add(parent_mname)
else:
# manage the case where an ancestor parent field is set
collect_models_to_avoid(self.env[parent_mname])
collect_models_to_avoid(self)
def avoid(field):
# check whether the field is inherited from one of avoid_models
if avoid_models:
while field.inherited:
field = field.related_field
if field.model_name in avoid_models:
return True
return False
# compute missing fields
missing_defaults = {
name
for name, field in self._fields.items()
if name not in values
if not avoid(field)
}
if not missing_defaults:
return values
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(list(missing_defaults))
for name, value in defaults.items():
if self._fields[name].type == 'many2many' and value and isinstance(value[0], int):
# convert a list of ids into a list of commands
defaults[name] = [Command.set(value)]
elif self._fields[name].type == 'one2many' and value and isinstance(value[0], dict):
# convert a list of dicts into a list of commands
defaults[name] = [Command.create(x) for x in value]
defaults.update(values)
return defaults
@classmethod
def clear_caches(cls):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
cls.pool._clear_cache()
@api.model
def _read_group_expand_full(self, groups, domain, order):
"""Extend the group to include all target records by default."""
return groups.search([], order=order)
@api.model
def _read_group_fill_results(self, domain, groupby, remaining_groupbys,
aggregated_fields, count_field,
read_group_result, read_group_order=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
field = self._fields[groupby]
if not field.group_expand:
return read_group_result
# field.group_expand is a callable or the name of a method, that returns
# the groups that we want to display for this field, in the form of a
# recordset or a list of values (depending on the type of the field).
# This is useful to implement kanban views for instance, where some
# columns should be displayed even if they don't contain any record.
group_expand = field.group_expand
if isinstance(group_expand, str):
group_expand = getattr(type(self), group_expand)
assert callable(group_expand)
# determine all groups that should be returned
values = [line[groupby] for line in read_group_result if line[groupby]]
if field.relational:
# groups is a recordset; determine order on groups's model
groups = self.env[field.comodel_name].browse([value[0] for value in values])
order = groups._order
if read_group_order == groupby + ' desc':
order = tools.reverse_order(order)
groups = group_expand(self, groups, domain, order)
groups = groups.sudo()
values = lazy_name_get(groups)
value2key = lambda value: value and value[0]
else:
# groups is a list of values
values = group_expand(self, values, domain, None)
if read_group_order == groupby + ' desc':
values.reverse()
value2key = lambda value: value
# Merge the current results (list of dicts) with all groups. Determine
# the global order of results groups, which is supposed to be in the
# same order as read_group_result (in the case of a many2one field).
result = OrderedDict((value2key(value), {}) for value in values)
# fill in results from read_group_result
for line in read_group_result:
key = value2key(line[groupby])
if not result.get(key):
result[key] = line
else:
result[key][count_field] = line[count_field]
# fill in missing results from all groups
for value in values:
key = value2key(value)
if not result[key]:
line = dict.fromkeys(aggregated_fields, False)
line[groupby] = value
line[groupby + '_count'] = 0
line['__domain'] = [(groupby, '=', key)] + domain
if remaining_groupbys:
line['__context'] = {'group_by': remaining_groupbys}
result[key] = line
# add folding information if present
if field.relational and groups._fold_name in groups._fields:
fold = {group.id: group[groups._fold_name]
for group in groups.browse([key for key in result if key])}
for key, line in result.items():
line['__fold'] = fold.get(key, False)
return list(result.values())
@api.model
def _read_group_fill_temporal(self, data, groupby, aggregated_fields, annotated_groupbys,
fill_from=False, fill_to=False, min_groups=False):
"""Helper method for filling date/datetime 'holes' in a result set.
We are in a use case where data are grouped by a date field (typically
months but it could be any other interval) and displayed in a chart.
Assume we group records by month, and we only have data for June,
September and December. By default, plotting the result gives something
like:
___
___ | |
| | ___ | |
|___||___||___|
Jun Sep Dec
The problem is that December data immediately follow September data,
which is misleading for the user. Adding explicit zeroes for missing
data gives something like:
___
___ | |
| | ___ | |
|___| ___ ___ |___| ___ ___ |___|
Jun Jul Aug Sep Oct Nov Dec
To customize this output, the context key "fill_temporal" can be used
under its dictionary format, which has 3 attributes : fill_from,
fill_to, min_groups (see params of this function)
Fill between bounds:
Using either `fill_from` and/or `fill_to` attributes, we can further
specify that at least a certain date range should be returned as
contiguous groups. Any group outside those bounds will not be removed,
but the filling will only occur between the specified bounds. When not
specified, existing groups will be used as bounds, if applicable.
By specifying such bounds, we can get empty groups before/after any
group with data.
If we want to fill groups only between August (fill_from)
and October (fill_to):
___
___ | |
| | ___ | |
|___| ___ |___| ___ |___|
Jun Aug Sep Oct Dec
We still get June and December. To filter them out, we should match
`fill_from` and `fill_to` with the domain e.g. ['&',
('date_field', '>=', 'YYYY-08-01'),
('date_field', '<', 'YYYY-11-01')]:
___
___ |___| ___
Aug Sep Oct
Minimal filling amount:
Using `min_groups`, we can specify that we want at least that amount of
contiguous groups. This amount is guaranteed to be provided from
`fill_from` if specified, or from the lowest existing group otherwise.
This amount is not restricted by `fill_to`. If there is an existing
group before `fill_from`, `fill_from` is still used as the starting
group for min_groups, because the filling does not apply on that
existing group. If neither `fill_from` nor `fill_to` is specified, and
there is no existing group, no group will be returned.
If we set min_groups = 4:
___
___ |___| ___ ___
Aug Sep Oct Nov
:param list data: the data containing groups
:param list groupby: name of the first group by
:param list aggregated_fields: list of aggregated fields in the query
:param str fill_from: (inclusive) string representation of a
date/datetime, start bound of the fill_temporal range
formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
:param str fill_to: (inclusive) string representation of a
date/datetime, end bound of the fill_temporal range
formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
:param int min_groups: minimal amount of required groups for the
fill_temporal range (should be >= 1)
:rtype: list
:return: list
"""
first_a_gby = annotated_groupbys[0]
if first_a_gby['type'] not in ('date', 'datetime'):
return data
interval = first_a_gby['interval']
granularity = first_a_gby['granularity']
tz = pytz.timezone(self._context['tz']) if first_a_gby["tz_convert"] else False
groupby_name = groupby[0]
# existing non null datetimes
existing = [d[groupby_name] for d in data if d[groupby_name]] or [None]
# assumption: existing data is sorted by field 'groupby_name'
existing_from, existing_to = existing[0], existing[-1]
if fill_from:
fill_from = date_utils.start_of(odoo.fields.Datetime.to_datetime(fill_from), granularity)
if tz:
fill_from = tz.localize(fill_from)
elif existing_from:
fill_from = existing_from
if fill_to:
fill_to = date_utils.start_of(odoo.fields.Datetime.to_datetime(fill_to), granularity)
if tz:
fill_to = tz.localize(fill_to)
elif existing_to:
fill_to = existing_to
if not fill_to and fill_from:
fill_to = fill_from
if not fill_from and fill_to:
fill_from = fill_to
if not fill_from and not fill_to:
return data
if min_groups > 0:
fill_to = max(fill_to, fill_from + (min_groups - 1) * interval)
if fill_to < fill_from:
return data
required_dates = date_utils.date_range(fill_from, fill_to, interval)
if existing[0] is None:
existing = list(required_dates)
else:
existing = sorted(set().union(existing, required_dates))
empty_item = {'id': False, (groupby_name.split(':')[0] + '_count'): 0}
empty_item.update({key: False for key in aggregated_fields})
empty_item.update({key: False for key in [group['groupby'] for group in annotated_groupbys[1:]]})
grouped_data = collections.defaultdict(list)
for d in data:
grouped_data[d[groupby_name]].append(d)
result = []
for dt in existing:
result.extend(grouped_data[dt] or [dict(empty_item, **{groupby_name: dt})])
if False in grouped_data:
result.extend(grouped_data[False])
return result
@api.model
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
# when a field is grouped as 'foo:bar', both orderby='foo' and
# orderby='foo:bar' generate the clause 'ORDER BY "foo:bar"'
groupby_fields = {
gb[key]: gb['groupby']
for gb in annotated_groupbys
for key in ('field', 'groupby')
}
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field == 'id' or order_field in groupby_fields:
if self._fields[order_field.split(':')[0]].type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order_split[0] = '"%s"' % groupby_fields.get(order_field, order_field)
orderby_terms.append(' '.join(order_split))
elif order_field in aggregated_fields:
order_split[0] = '"%s"' % order_field
orderby_terms.append(' '.join(order_split))
elif order_field not in self._fields:
raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warning('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
@api.model
def _read_group_process_groupby(self, gb, query):
"""
Helper method to collect important information about groupbys: raw
field name, type, time information, qualified name, ...
"""
split = gb.split(':')
field = self._fields.get(split[0])
if not field:
raise ValueError("Invalid field %r on model %r" % (split[0], self._name))
field_type = field.type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and self._context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(self._table, split[0], query)
if temporal:
display_formats = {
# Careful with week/year formats:
# - yyyy (lower) must always be used, *except* for week+year formats
# - YYYY (upper) must always be used for week+year format
# e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
# and W1 2006 for others
#
# Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
# such as 2006-01-01 being formatted as "January 2005" in some locales.
# Cfr: http://babel.pocoo.org/en/latest/dates.html#date-fields
'hour': 'hh:00 dd MMM',
'day': 'dd MMM yyyy', # yyyy = normal year
'week': "'W'w YYYY", # w YYYY = ISO week-year
'month': 'MMMM yyyy',
'quarter': 'QQQ yyyy',
'year': 'yyyy',
}
time_intervals = {
'hour': dateutil.relativedelta.relativedelta(hours=1),
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (self._context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s::timestamp)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'granularity': gb_function or 'month' if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field,
}
@api.model
def _read_group_prepare_data(self, key, value, groupby_dict):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, str):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(self._context['tz']).localize(value)
return value
@api.model
def _read_group_format_result(self, data, annotated_groupbys, groupby, domain):
"""
Helper method to format the data contained in the dictionary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
:param data: a single group
:param annotated_groupbys: expanded grouping metainformation
:param groupby: original grouping metainformation
:param domain: original domain for read_group
"""
sections = []
for gb in annotated_groupbys:
ftype = gb['type']
value = data[gb['groupby']]
# full domain for this groupby spec
d = None
if value:
if ftype in ['many2one', 'many2many']:
value = value[0]
elif ftype in ('date', 'datetime'):
locale = get_lang(self.env).code
fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
tzinfo = None
range_start = value
range_end = value + gb['interval']
# value from postgres is in local tz (so range is
# considered in local tz e.g. "day" is [00:00, 00:00[
# local rather than UTC which could be [11:00, 11:00]
# local) but domain and raw value should be in UTC
if gb['tz_convert']:
tzinfo = range_start.tzinfo
range_start = range_start.astimezone(pytz.utc)
# take into account possible hour change between start and end
range_end = tzinfo.localize(range_end.replace(tzinfo=None))
range_end = range_end.astimezone(pytz.utc)
range_start = range_start.strftime(fmt)
range_end = range_end.strftime(fmt)
if ftype == 'datetime':
label = babel.dates.format_datetime(
value, format=gb['display_format'],
tzinfo=tzinfo, locale=locale
)
else:
label = babel.dates.format_date(
value, format=gb['display_format'],
locale=locale
)
data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)
d = [
'&',
(gb['field'], '>=', range_start),
(gb['field'], '<', range_end),
]
if d is None:
d = [(gb['field'], '=', value)]
sections.append(d)
sections.append(domain)
data['__domain'] = expression.AND(sections)
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
"""Get the list of records in list view grouped by the given ``groupby`` fields.
:param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
list to match all records.
:param list fields: list of fields present in the list view specified on the object.
Each element is either 'field' (field name, using the default aggregation),
or 'field:agg' (aggregate field with aggregation function 'agg'),
or 'name:agg(field)' (aggregate field with 'agg' and return it as 'name').
The possible aggregation functions are the ones provided by PostgreSQL
(https://www.postgresql.org/docs/current/static/functions-aggregate.html)
and 'count_distinct', with the expected meaning.
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param str orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
* __range: (date/datetime only) dictionary with field names as keys mapping to
a dictionary with keys: "from" (inclusive) and "to" (exclusive)
mapping to a string representation of the temporal bounds of the group
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
result = self._read_group_raw(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
dt = [
f for f in groupby
if self._fields[f.split(':')[0]].type in ('date', 'datetime') # e.g. 'date:month'
]
# iterate on all results and replace the "full" date/datetime value (<=> group[df])
# which is a tuple (range, label) by just the formatted label, in-place.
# we store the range under another format, by adding a new __range key for each
# group, mapping to a sub-dictionary: {field: {from: #inclusive#, to: #exclusive#}}
for group in result:
if dt:
group["__range"] = {}
for df in dt:
# could group on a date(time) field which is empty in some
# records, in which case as with m2o the _raw value will be
# `False` instead of a (value, label) pair. In that case,
# leave the `False` value alone
field_name = df.split(':')[0]
if group.get(df):
range_from, range_to = group[df][0].split('/')
# /!\ could break if DEFAULT_SERVER_DATE_FORMAT allows '/' characters
group["__range"][field_name] = {
"from": range_from,
"to": range_to
}
group[df] = group[df][1]
else:
group["__range"][field_name] = False
return result
@api.model
def _read_group_raw(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
self.check_access_rights('read')
query = self._where_calc(domain)
fields = fields or [f.name for f in self._fields.values() if f.store]
groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(query, 'read')
for gb in groupby_fields:
if gb not in self._fields:
raise UserError(_("Unknown field %r in 'groupby'") % gb)
if not self._fields[gb].base_field.groupable:
raise UserError(_(
"Field %s is not a stored field, only stored fields (regular or "
"many2many) are valid for the 'groupby' parameter", self._fields[gb],
))
aggregated_fields = []
select_terms = []
fnames = [] # list of fields to flush
for fspec in fields:
if fspec == 'sequence':
continue
if fspec == '__count':
# the web client sometimes adds this pseudo-field in the list
continue
match = regex_field_agg.match(fspec)
if not match:
raise UserError(_("Invalid field specification %r.", fspec))
name, func, fname = match.groups()
if func:
# we have either 'name:func' or 'name:func(fname)'
fname = fname or name
field = self._fields.get(fname)
if not field:
raise ValueError("Invalid field %r on model %r" % (fname, self._name))
if not (field.base_field.store and field.base_field.column_type):
raise UserError(_("Cannot aggregate field %r.", fname))
if func not in VALID_AGGREGATE_FUNCTIONS:
raise UserError(_("Invalid aggregation function %r.", func))
else:
# we have 'name', retrieve the aggregator on the field
field = self._fields.get(name)
if not field:
raise ValueError("Invalid field %r on model %r" % (name, self._name))
if not (field.base_field.store and
field.base_field.column_type and field.group_operator):
continue
func, fname = field.group_operator, name
fnames.append(fname)
if fname in groupby_fields:
continue
if name in aggregated_fields:
raise UserError(_("Output name %r is used twice.", name))
aggregated_fields.append(name)
expr = self._inherits_join_calc(self._table, fname, query)
if func.lower() == 'count_distinct':
term = 'COUNT(DISTINCT %s) AS "%s"' % (expr, name)
else:
term = '%s(%s) AS "%s"' % (func, expr, name)
select_terms.append(term)
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
self._flush_search(domain, fields=fnames + groupby_fields)
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
count_field += '_count'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min("%(table)s".id) AS id, count("%(table)s".id) AS "%(count_field)s" %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
self._cr.execute(query, where_clause_params)
fetched_data = self._cr.dictfetchall()
if not groupby_fields:
return fetched_data
self._read_group_resolve_many2x_fields(fetched_data, annotated_groupbys)
data = [{k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.items()} for r in fetched_data]
fill_temporal = self.env.context.get('fill_temporal')
if (data and fill_temporal) or isinstance(fill_temporal, dict):
# fill_temporal = {} is equivalent to fill_temporal = True
# if fill_temporal is a dictionary and there is no data, there is a chance that we
# want to display empty columns anyway, so we should apply the fill_temporal logic
if not isinstance(fill_temporal, dict):
fill_temporal = {}
data = self._read_group_fill_temporal(data, groupby, aggregated_fields,
annotated_groupbys, **fill_temporal)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, domain) for d in data]
if lazy:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(
domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, count_field, result, read_group_order=order,
)
return result
def _read_group_resolve_many2x_fields(self, data, fields):
many2xfields = {field['field'] for field in fields if field['type'] in ['many2one', 'many2many']}
for field in many2xfields:
ids_set = {d[field] for d in data if d[field]}
m2x_records = self.env[self._fields[field].comodel_name].browse(ids_set)
data_dict = dict(lazy_name_get(m2x_records.sudo()))
for d in data:
d[field] = (d[field], data_dict[d[field]]) if d[field] else False
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.env[parent_model_name]
parent_alias = query.left_join(
current_model._table, inherits_field, parent_model._table, 'id', inherits_field,
)
return parent_alias
@api.model
def _inherits_join_calc(self, alias, fname, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param alias: name of the initial SQL alias
:param fname: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
# INVARIANT: alias is the SQL alias of model._table in query
model, field = self, self._fields[fname]
while field.inherited:
# retrieve the parent model where field is inherited from
parent_model = self.env[field.related_field.model_name]
parent_fname = field.related.split('.')[0]
# JOIN parent_model._table AS parent_alias ON alias.parent_fname = parent_alias.id
parent_alias = query.left_join(
alias, parent_fname, parent_model._table, 'id', parent_fname,
)
model, alias, field = parent_model, parent_alias, field.related_field
if field.type == 'many2many':
# special case for many2many fields: prepare a query on the comodel
# in order to reuse the mechanism _apply_ir_rules, then inject the
# query as an extra condition of the left join
comodel = self.env[field.comodel_name]
subquery = Query(self.env.cr, comodel._table)
comodel._apply_ir_rules(subquery)
# add the extra join condition only if there is an actual subquery
extra, extra_params = None, ()
if subquery.where_clause:
subquery_str, extra_params = subquery.select()
extra = '"{rhs}"."%s" IN (%s)' % (field.column2, subquery_str)
# LEFT JOIN field_relation ON
# alias.id = field_relation.field_column1
# AND field_relation.field_column2 IN (subquery)
rel_alias = query.left_join(
alias, 'id', field.relation, field.column1, field.name,
extra=extra, extra_params=extra_params,
)
return '"%s"."%s"' % (rel_alias, field.column2)
elif field.translate is True:
# handle the case where the field is translated
return model._generate_translated_field(alias, fname, query)
else:
return '"%s"."%s"' % (alias, fname)
def _parent_store_compute(self):
""" Compute parent_path field from scratch. """
if not self._parent_store:
return
# Each record is associated to a string 'parent_path', that represents
# the path from the record's root node to the record. The path is made
# of the node ids suffixed with a slash (see example below). The nodes
# in the subtree of record are the ones where 'parent_path' starts with
# the 'parent_path' of record.
#
# a node | id | parent_path
# / \ a | 42 | 42/
# ... b b | 63 | 42/63/
# / \ c | 84 | 42/63/84/
# c d d | 85 | 42/63/85/
#
# Note: the final '/' is necessary to match subtrees correctly: '42/63'
# is a prefix of '42/630', but '42/63/' is not a prefix of '42/630/'.
_logger.info('Computing parent_path for table %s...', self._table)
query = """
WITH RECURSIVE __parent_store_compute(id, parent_path) AS (
SELECT row.id, concat(row.id, '/')
FROM {table} row
WHERE row.{parent} IS NULL
UNION
SELECT row.id, concat(comp.parent_path, row.id, '/')
FROM {table} row, __parent_store_compute comp
WHERE row.{parent} = comp.id
)
UPDATE {table} row SET parent_path = comp.parent_path
FROM __parent_store_compute comp
WHERE row.id = comp.id
""".format(table=self._table, parent=self._parent_name)
self.env.cr.execute(query)
self.invalidate_cache(['parent_path'])
return True
def _check_removed_columns(self, log=False):
if self._abstract:
return
# iterate on the database columns to drop the NOT NULL constraints of
# fields which were required but have been removed (or will be added by
# another module)
cr = self._cr
cols = [name for name, field in self._fields.items()
if field.store and field.column_type]
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(cols))),
for row in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
row['attname'], self._table, self._name)
if row['attnotnull']:
tools.drop_not_null(cr, self._table, row['attname'])
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows. """
# get the default value; ideally, we should use default_get(), but it
# fails due to ir.default not being ready
field = self._fields[column_name]
if field.default:
value = field.default(self)
value = field.convert_to_write(value, self)
value = field.convert_to_column(value, self)
else:
value = None
# Write value if non-NULL, except for booleans for which False means
# the same as NULL - this saves us an expensive query on large tables.
necessary = (value is not None) if field.type != 'boolean' else value
if necessary:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, value)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" IS NULL' % (
self._table, column_name, field.column_format, column_name)
self._cr.execute(query, (value,))
@ormcache()
def _table_has_rows(self):
""" Return whether the model's table has rows. This method should only
be used when updating the database schema (:meth:`~._auto_init`).
"""
self.env.cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
return self.env.cr.rowcount
def _auto_init(self):
""" Initialize the database schema of ``self``:
- create the corresponding table,
- create/update the necessary columns/tables for fields,
- initialize new columns on existing rows,
- add the SQL constraints given on the model,
- add the indexes on indexed fields,
Also prepare post-init stuff to:
- add foreign key constraints,
- reflect models, fields, relations and constraints,
- mark fields to recompute on existing records.
Note: you should not override this method. Instead, you can modify
the model's database schema by overriding method :meth:`~.init`,
which is called right after this one.
"""
raise_on_invalid_object_name(self._name)
# This prevents anything called by this method (in particular default
# values) from prefetching a field for which the corresponding column
# has not been added in database yet!
self = self.with_context(prefetch_fields=False)
cr = self._cr
update_custom_fields = self._context.get('update_custom_fields', False)
must_create_table = not tools.table_exists(cr, self._table)
parent_path_compute = False
if self._auto:
if must_create_table:
def make_type(field):
return field.column_type[1] + (" NOT NULL" if field.required else "")
tools.create_model_table(cr, self._table, self._description, [
(name, make_type(field), field.string)
for name, field in self._fields.items()
if name != 'id' and field.store and field.column_type
])
if self._parent_store:
if not tools.column_exists(cr, self._table, 'parent_path'):
self._create_parent_columns()
parent_path_compute = True
if not must_create_table:
self._check_removed_columns(log=False)
# update the database schema for fields
columns = tools.table_columns(cr, self._table)
fields_to_compute = []
for field in self._fields.values():
if not field.store:
continue
if field.manual and not update_custom_fields:
continue # don't update custom fields
new = field.update_db(self, columns)
if new and field.compute:
fields_to_compute.append(field.name)
if fields_to_compute:
@self.pool.post_init
def mark_fields_to_compute():
recs = self.with_context(active_test=False).search([], order='id')
if not recs:
return
for field in fields_to_compute:
_logger.info("Storing computed values of %s.%s", recs._name, field)
self.env.add_to_compute(recs._fields[field], recs)
if self._auto:
self._add_sql_constraints()
if must_create_table:
self._execute_sql()
if parent_path_compute:
self._parent_store_compute()
def init(self):
""" This method is called after :meth:`~._auto_init`, and may be
overridden to create or modify a model's database schema.
"""
pass
def _create_parent_columns(self):
tools.create_column(self._cr, self._table, 'parent_path', 'VARCHAR')
if 'parent_path' not in self._fields:
_logger.error("add a field parent_path on model %s: parent_path = fields.Char(index=True)", self._name)
elif not self._fields['parent_path'].index:
_logger.error('parent_path field on model %s must be indexed! Add index=True to the field definition)', self._name)
def _add_sql_constraints(self):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
cr = self._cr
foreign_key_re = re.compile(r'\s*foreign\s+key\b.*', re.I)
for (key, definition, message) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
current_definition = tools.constraint_definition(cr, self._table, conname)
if current_definition == definition:
continue
if current_definition:
# constraint exists but its definition may have changed
tools.drop_constraint(cr, self._table, conname)
if foreign_key_re.match(definition):
self.pool.post_init(tools.add_constraint, cr, self._table, conname, definition)
else:
self.pool.post_constraint(tools.add_constraint, cr, self._table, conname, definition)
def _execute_sql(self):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
self._cr.execute(self._sql)
#
# Update objects that uses this one to update their _inherits fields
#
@api.model
def _add_inherited_fields(self):
""" Determine inherited fields. """
if self._abstract or not self._inherits:
return
# determine which fields can be inherited
to_inherit = {
name: (parent_fname, field)
for parent_model_name, parent_fname in self._inherits.items()
for name, field in self.env[parent_model_name]._fields.items()
}
# add inherited fields that are not redefined locally
for name, (parent_fname, field) in to_inherit.items():
if name not in self._fields:
# inherited fields are implemented as related fields, with the
# following specific properties:
# - reading inherited fields should not bypass access rights
# - copy inherited fields iff their original field is copied
Field = type(field)
self._add_field(name, Field(
inherited=True,
inherited_field=field,
related=f"{parent_fname}.{name}",
related_sudo=False,
copy=field.copy,
readonly=field.readonly,
))
@api.model
def _inherits_check(self):
for table, field_name in self._inherits.items():
field = self._fields.get(field_name)
if not field:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
from .fields import Many2one
field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
self._add_field(field_name, field)
elif not (field.required and (field.ondelete or "").lower() in ("cascade", "restrict")):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
field.required = True
field.ondelete = "cascade"
field.delegate = True
# reflect fields with delegate=True in dictionary self._inherits
for field in self._fields.values():
if field.type == 'many2one' and not field.related and field.delegate:
if not field.required:
_logger.warning("Field %s with delegate=True must be required.", field)
field.required = True
if field.ondelete.lower() not in ('cascade', 'restrict'):
field.ondelete = 'cascade'
type(self)._inherits = {**self._inherits, field.comodel_name: field.name}
self.pool[field.comodel_name]._inherits_children.add(self._name)
@api.model
def _prepare_setup(self):
""" Prepare the setup of the model. """
cls = type(self)
cls._setup_done = False
# changing base classes is costly, do it only when necessary
if cls.__bases__ != cls.__base_classes:
cls.__bases__ = cls.__base_classes
# reset those attributes on the model's class for _setup_fields() below
for attr in ('_rec_name', '_active_name'):
discardattr(cls, attr)
@api.model
def _setup_base(self):
""" Determine the inherited and custom fields of the model. """
cls = type(self)
if cls._setup_done:
return
# 1. determine the proper fields of the model: the fields defined on the
# class and magic fields, not the inherited or custom ones
# retrieve fields from parent classes, and duplicate them on cls to
# avoid clashes with inheritance between different models
for name in cls._fields:
discardattr(cls, name)
cls._fields.clear()
# collect the definitions of each field (base definition + overrides)
definitions = defaultdict(list)
for klass in reversed(cls.mro()):
if is_definition_class(klass):
for field in klass._field_definitions:
definitions[field.name].append(field)
for name, fields_ in definitions.items():
if len(fields_) == 1 and fields_[0]._direct and fields_[0].model_name == cls._name:
cls._fields[name] = fields_[0]
else:
Field = type(fields_[-1])
self._add_field(name, Field(_base_fields=fields_))
# 2. add manual fields
if self.pool._init_modules:
self.env['ir.model.fields']._add_manual_fields(self)
# 3. make sure that parent models determine their own fields, then add
# inherited fields to cls
self._inherits_check()
for parent in self._inherits:
self.env[parent]._setup_base()
self._add_inherited_fields()
# 4. initialize more field metadata
cls._setup_done = True
for field in cls._fields.values():
field.prepare_setup()
# 5. determine and validate rec_name
if cls._rec_name:
assert cls._rec_name in cls._fields, \
"Invalid _rec_name=%r for model %r" % (cls._rec_name, cls._name)
elif 'name' in cls._fields:
cls._rec_name = 'name'
elif cls._custom and 'x_name' in cls._fields:
cls._rec_name = 'x_name'
# 6. determine and validate active_name
if cls._active_name:
assert (cls._active_name in cls._fields
and cls._active_name in ('active', 'x_active')), \
("Invalid _active_name=%r for model %r; only 'active' and "
"'x_active' are supported and the field must be present on "
"the model") % (cls._active_name, cls._name)
elif 'active' in cls._fields:
cls._active_name = 'active'
elif 'x_active' in cls._fields:
cls._active_name = 'x_active'
@api.model
def _setup_fields(self):
""" Setup the fields, except for recomputation triggers. """
cls = type(self)
# set up fields
bad_fields = []
for name, field in cls._fields.items():
try:
field.setup(self)
except Exception:
if field.base_field.manual:
# Something goes wrong when setup a manual field.
# This can happen with related fields using another manual many2one field
# that hasn't been loaded because the comodel does not exist yet.
# This can also be a manual function field depending on not loaded fields yet.
bad_fields.append(name)
continue
raise
for name in bad_fields:
self._pop_field(name)
@api.model
def _setup_complete(self):
""" Setup recomputation triggers, and complete the model setup. """
cls = type(self)
# register constraints and onchange methods
cls._init_constraints_onchanges()
@api.model
def fields_get(self, allfields=None, attributes=None):
""" fields_get([fields][, attributes])
Return the definition of each field.
The returned value is a dictionary (indexed by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param allfields: list of fields to document, all if empty or not provided
:param attributes: list of description attributes to return for each field, all if empty or not provided
"""
has_access = functools.partial(self.check_access_rights, raise_exception=False)
readonly = not (has_access('write') or has_access('create'))
res = {}
for fname, field in self._fields.items():
if allfields and fname not in allfields:
continue
if field.groups and not self.env.su and not self.user_has_groups(field.groups):
continue
description = field.get_description(self.env)
description['name'] = fname
if readonly:
description['readonly'] = True
description['states'] = {}
if attributes:
description = {key: val
for key, val in description.items()
if key in attributes}
res[fname] = description
return res
@api.model
def get_empty_list_help(self, help):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
@api.model
def check_field_access_rights(self, operation, fields):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
if self.env.su:
return fields or list(self._fields)
def valid(fname):
""" determine whether user has access to field ``fname`` """
field = self._fields.get(fname)
if field and field.groups:
return self.user_has_groups(field.groups)
else:
return True
if not fields:
fields = [name for name in self._fields if valid(name)]
else:
invalid_fields = {name for name in fields if not valid(name)}
if invalid_fields:
_logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
operation, self._uid, self._name, ', '.join(invalid_fields))
description = self.env['ir.model']._get(self._name).name
if not self.env.user.has_group('base.group_no_one'):
raise AccessError(
_('You do not have enough rights to access the fields "%(fields)s" on %(document_kind)s (%(document_model)s). '\
'Please contact your system administrator.\n\n(Operation: %(operation)s)') % {
'fields': ','.join(list(invalid_fields)),
'document_kind': description,
'document_model': self._name,
'operation': operation,
})
def format_groups(field):
if field.groups == '.':
return _("always forbidden")
anyof = self.env['res.groups']
noneof = self.env['res.groups']
for g in field.groups.split(','):
if g.startswith('!'):
noneof |= self.env.ref(g[1:])
else:
anyof |= self.env.ref(g)
strs = []
if anyof:
strs.append(_("allowed for groups %s") % ', '.join(
anyof.sorted(lambda g: g.id)
.mapped(lambda g: repr(g.display_name))
))
if noneof:
strs.append(_("forbidden for groups %s") % ', '.join(
noneof.sorted(lambda g: g.id)
.mapped(lambda g: repr(g.display_name))
))
return '; '.join(strs)
raise AccessError(_("""The requested operation can not be completed due to security restrictions.
Document type: %(document_kind)s (%(document_model)s)
Operation: %(operation)s
User: %(user)s
Fields:
%(fields_list)s""") % {
'document_model': self._name,
'document_kind': description or self._name,
'operation': operation,
'user': self._uid,
'fields_list': '\n'.join(
'- %s (%s)' % (f, format_groups(self._fields[f]))
for f in sorted(invalid_fields)
)
})
return fields
def read(self, fields=None, load='_classic_read'):
""" read([fields])
Reads the requested fields for the records in ``self``, low-level/RPC
method. In Python code, prefer :meth:`~.browse`.
:param fields: list of field names to return (default is all fields)
:return: a list of dictionaries mapping field names to their values,
with one dictionary per record
:raise AccessError: if user has no read rights on some of the given
records
"""
fields = self.check_field_access_rights('read', fields)
# fetch stored fields from the database to the cache
stored_fields = set()
for name in fields:
field = self._fields.get(name)
if not field:
raise ValueError("Invalid field %r on model %r" % (name, self._name))
if field.store:
stored_fields.add(name)
elif field.compute:
# optimization: prefetch direct field dependencies
for dotname in self.pool.field_depends[field]:
f = self._fields[dotname.split('.')[0]]
if f.prefetch and (not f.groups or self.user_has_groups(f.groups)):
stored_fields.add(f.name)
self._read(stored_fields)
return self._read_format(fnames=fields, load=load)
def _read_format(self, fnames, load='_classic_read'):
"""Returns a list of dictionaries mapping field names to their values,
with one dictionary per record that exists.
The output format is similar to the one expected from the `read` method.
The current method is different from `read` because it retrieves its
values from the cache without doing a query when it is avoidable.
"""
data = [(record, {'id': record._ids[0]}) for record in self]
use_name_get = (load == '_classic_read')
for name in fnames:
convert = self._fields[name].convert_to_read
for record, vals in data:
# missing records have their vals empty
if not vals:
continue
try:
vals[name] = convert(record[name], record, use_name_get)
except MissingError:
vals.clear()
result = [vals for record, vals in data if vals]
return result
def _fetch_field(self, field):
""" Read from the database in order to fetch ``field`` (:class:`Field`
instance) for ``self`` in cache.
"""
self.check_field_access_rights('read', [field.name])
# determine which fields can be prefetched
if self._context.get('prefetch_fields', True) and field.prefetch:
fnames = [
name
for name, f in self._fields.items()
# select fields that can be prefetched
if f.prefetch
# discard fields with groups that the user may not access
if not (f.groups and not self.user_has_groups(f.groups))
# discard fields that must be recomputed
if not (f.compute and self.env.records_to_compute(f))
]
if field.name not in fnames:
fnames.append(field.name)
self = self - self.env.records_to_compute(field)
else:
fnames = [field.name]
self._read(fnames)
def _read(self, fields):
""" Read the given fields of the records in ``self`` from the database,
and store them in cache. Access errors are also stored in cache.
Skip fields that are not stored.
:param field_names: list of column names of model ``self``; all those
fields are guaranteed to be read
:param inherited_field_names: list of column names from parent
models; some of those fields may not be read
"""
if not self:
return
self.check_access_rights('read')
# if a read() follows a write(), we must flush updates, as read() will
# fetch from database and overwrites the cache (`test_update_with_id`)
self.flush(fields, self)
field_names = []
inherited_field_names = []
for name in fields:
field = self._fields.get(name)
if field:
if field.store:
field_names.append(name)
elif field.base_field.store:
inherited_field_names.append(name)
else:
_logger.warning("%s.read() with unknown field '%s'", self._name, name)
# determine the fields that are stored as columns in tables; ignore 'id'
fields_pre = [
field
for field in (self._fields[name] for name in field_names + inherited_field_names)
if field.name != 'id'
if field.base_field.store and field.base_field.column_type
if not (field.inherited and callable(field.base_field.translate))
]
if fields_pre:
env = self.env
cr, user, context, su = env.args
# make a query object for selecting ids, and apply security rules to it
query = Query(self.env.cr, self._table, self._table_query)
self._apply_ir_rules(query, 'read')
# the query may involve several tables: we need fully-qualified names
def qualify(field):
col = field.name
res = self._inherits_join_calc(self._table, field.name, query)
if field.type == 'binary' and (context.get('bin_size') or context.get('bin_size_' + col)):
# PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
res = 'pg_size_pretty(length(%s)::bigint)' % res
return '%s as "%s"' % (res, col)
# selected fields are: 'id' followed by fields_pre
qual_names = [qualify(name) for name in [self._fields['id']] + fields_pre]
# determine the actual query to execute (last parameter is added below)
query.add_where('"%s".id IN %%s' % self._table)
query_str, params = query.select(*qual_names)
result = []
for sub_ids in cr.split_for_in_conditions(self.ids):
cr.execute(query_str, params + [sub_ids])
result += cr.fetchall()
else:
try:
self.check_access_rule('read')
except MissingError:
# Method _read() should never raise a MissingError, but method
# check_access_rule() can, because it must read fields on self.
# So we restrict 'self' to existing records (to avoid an extra
# exists() at the end of the method).
self = self.exists()
self.check_access_rule('read')
result = [(id_,) for id_ in self.ids]
fetched = self.browse()
if result:
cols = zip(*result)
ids = next(cols)
fetched = self.browse(ids)
for field in fields_pre:
values = next(cols)
if context.get('lang') and not field.inherited and callable(field.translate):
values = list(values)
if any(values):
translate = field.get_trans_func(fetched)
for index in range(len(ids)):
values[index] = translate(ids[index], values[index])
# store values in cache
self.env.cache.update(fetched, field, values)
# determine the fields that must be processed now;
# for the sake of simplicity, we ignore inherited fields
for name in field_names:
field = self._fields[name]
if not field.column_type:
field.read(fetched)
if field.deprecated:
_logger.warning('Field %s is deprecated: %s', field, field.deprecated)
# possibly raise exception for the records that could not be read
missing = self - fetched
if missing:
extras = fetched - self
if extras:
raise AccessError(
_("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
missing._ids, extras._ids,
))
# mark non-existing records in missing
forbidden = missing.exists()
if forbidden:
raise self.env['ir.rule']._make_access_error('read', forbidden)
def get_metadata(self):
"""Return some metadata about the given records.
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
* noupdate: A boolean telling if the record will be updated or not
"""
IrModelData = self.env['ir.model.data'].sudo()
if self._log_access:
res = self.read(LOG_ACCESS_COLUMNS)
else:
res = [{'id': x} for x in self.ids]
xml_data = dict((x['res_id'], x) for x in IrModelData.search_read([('model', '=', self._name),
('res_id', 'in', self.ids)],
['res_id', 'noupdate', 'module', 'name'],
order='id DESC'))
for r in res:
value = xml_data.get(r['id'], {})
r['xmlid'] = '%(module)s.%(name)s' % value if value else False
r['noupdate'] = value.get('noupdate', False)
return res
def get_base_url(self):
"""
Returns rooturl for a specific given record.
By default, it return the ir.config.parameter of base_url
but it can be overridden by model.
:return: the base url for this record
:rtype: string
"""
if len(self) > 1:
raise ValueError("Expected singleton or no record: %s" % self)
return self.env['ir.config_parameter'].sudo().get_param('web.base.url')
def _check_concurrency(self):
if not (self._log_access and self._context.get(self.CONCURRENCY_CHECK_FIELD)):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in self._cr.split_for_in_conditions(self.ids):
nclauses = 0
params = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = self._context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
nclauses += 1
params.extend([id, update_date])
if not nclauses:
continue
query = "SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause] * nclauses))
self._cr.execute(query, tuple(params))
res = self._cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise ValidationError(_('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_company(self, fnames=None):
""" Check the companies of the values of the given field names.
:param list fnames: names of relational fields to check
:raises UserError: if the `company_id` of the value of any field is not
in `[False, self.company_id]` (or `self` if
:class:`~odoo.addons.base.models.res_company`).
For :class:`~odoo.addons.base.models.res_users` relational fields,
verifies record company is in `company_ids` fields.
User with main company A, having access to company A and B, could be
assigned or linked to records in company B.
"""
if fnames is None:
fnames = self._fields
regular_fields = []
property_fields = []
for name in fnames:
field = self._fields[name]
if field.relational and field.check_company and \
'company_id' in self.env[field.comodel_name]:
if not field.company_dependent:
regular_fields.append(name)
else:
property_fields.append(name)
if not (regular_fields or property_fields):
return
inconsistencies = []
for record in self:
company = record.company_id if record._name != 'res.company' else record
# The first part of the check verifies that all records linked via relation fields are compatible
# with the company of the origin document, i.e. `self.account_id.company_id == self.company_id`
for name in regular_fields:
corecord = record.sudo()[name]
# Special case with `res.users` since an user can belong to multiple companies.
if corecord._name == 'res.users' and corecord.company_ids:
if not (company <= corecord.company_ids):
inconsistencies.append((record, name, corecord))
elif not (corecord.company_id <= company):
inconsistencies.append((record, name, corecord))
# The second part of the check (for property / company-dependent fields) verifies that the records
# linked via those relation fields are compatible with the company that owns the property value, i.e.
# the company for which the value is being assigned, i.e:
# `self.property_account_payable_id.company_id == self.env.company
company = self.env.company
for name in property_fields:
# Special case with `res.users` since an user can belong to multiple companies.
corecord = record.sudo()[name]
if corecord._name == 'res.users' and corecord.company_ids:
if not (company <= corecord.company_ids):
inconsistencies.append((record, name, corecord))
elif not (corecord.company_id <= company):
inconsistencies.append((record, name, corecord))
if inconsistencies:
lines = [_("Incompatible companies on records:")]
company_msg = _("- Record is company %(company)r and %(field)r (%(fname)s: %(values)s) belongs to another company.")
record_msg = _("- %(record)r belongs to company %(company)r and %(field)r (%(fname)s: %(values)s) belongs to another company.")
for record, name, corecords in inconsistencies[:5]:
if record._name == 'res.company':
msg, company = company_msg, record
else:
msg, company = record_msg, record.company_id
field = self.env['ir.model.fields']._get(self._name, name)
lines.append(msg % {
'record': record.display_name,
'company': company.display_name,
'field': field.field_description,
'fname': field.name,
'values': ", ".join(repr(rec.display_name) for rec in corecords),
})
raise UserError("\n".join(lines))
@api.model
def check_access_rights(self, operation, raise_exception=True):
""" Verifies that the operation given by ``operation`` is allowed for
the current user according to the access rights.
"""
return self.env['ir.model.access'].check(self._name, operation, raise_exception)
def check_access_rule(self, operation):
""" Verifies that the operation given by ``operation`` is allowed for
the current user according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise UserError: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if self.env.su:
return
# SQL Alternative if computing in-memory is too slow for large dataset
# invalid = self - self._filter_access_rules(operation)
invalid = self - self._filter_access_rules_python(operation)
if not invalid:
return
forbidden = invalid.exists()
if forbidden:
# the invalid records are (partially) hidden by access rules
raise self.env['ir.rule']._make_access_error(operation, forbidden)
# If we get here, the invalid records are not in the database.
if operation in ('read', 'unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients.
return
_logger.info('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, self._uid, self._name)
raise MissingError(
_('One of the documents you are trying to access has been deleted, please try again after refreshing.')
+ '\n\n({} {}, {} {}, {} {}, {} {})'.format(
_('Document type:'), self._name, _('Operation:'), operation,
_('Records:'), invalid.ids[:6], _('User:'), self._uid,
)
)
def _filter_access_rules(self, operation):
""" Return the subset of ``self`` for which ``operation`` is allowed. """
if self.env.su:
return self
if not self._ids:
return self
query = Query(self.env.cr, self._table, self._table_query)
self._apply_ir_rules(query, operation)
if not query.where_clause:
return self
# determine ids in database that satisfy ir.rules
valid_ids = set()
query.add_where(f'"{self._table}".id IN %s')
query_str, params = query.select()
self._flush_search([])
for sub_ids in self._cr.split_for_in_conditions(self.ids):
self._cr.execute(query_str, params + [sub_ids])
valid_ids.update(row[0] for row in self._cr.fetchall())
# return new ids without origin and ids with origin in valid_ids
return self.browse([
it
for it in self._ids
if not (it or it.origin) or (it or it.origin) in valid_ids
])
def _filter_access_rules_python(self, operation):
dom = self.env['ir.rule']._compute_domain(self._name, operation)
return self.sudo().filtered_domain(dom or [])
def unlink(self):
""" unlink()
Deletes the records of the current set
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not self:
return True
self.check_access_rights('unlink')
self.check_access_rule('unlink')
self._check_concurrency()
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
for func in self._ondelete_methods:
# func._ondelete is True if it should be called during uninstallation
if func._ondelete or not self._context.get(MODULE_UNINSTALL_FLAG):
func(self)
# mark fields that depend on 'self' to recompute them after 'self' has
# been deleted (like updating a sum of lines after deleting one line)
self.flush()
self.modified(self._fields, before=True)
with self.env.norecompute():
cr = self._cr
Data = self.env['ir.model.data'].sudo().with_context({})
Defaults = self.env['ir.default'].sudo()
Property = self.env['ir.property'].sudo()
Attachment = self.env['ir.attachment'].sudo()
ir_model_data_unlink = Data
ir_attachment_unlink = Attachment
# TOFIX: this avoids an infinite loop when trying to recompute a
# field, which triggers the recomputation of another field using the
# same compute function, which then triggers again the computation
# of those two fields
for field in self._fields.values():
self.env.remove_to_compute(field, self)
for sub_ids in cr.split_for_in_conditions(self.ids):
# Check if the records are used as default properties.
refs = ['%s,%s' % (self._name, i) for i in sub_ids]
if Property.search([('res_id', '=', False), ('value_reference', 'in', refs)], limit=1):
raise UserError(_('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
Property.search([('res_id', 'in', refs)]).unlink()
query = "DELETE FROM %s WHERE id IN %%s" % self._table
cr.execute(query, (sub_ids,))
# Removing the ir_model_data reference if the record being deleted
# is a record created by xml/csv file, as these are not connected
# with real database foreign keys, and would be dangling references.
#
# Note: the following steps are performed as superuser to avoid
# access rights restrictions, and with no context to avoid possible
# side-effects during admin calls.
data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])
if data:
ir_model_data_unlink |= data
# For the same reason, remove the defaults having some of the
# records as value
Defaults.discard_records(self.browse(sub_ids))
# For the same reason, remove the relevant records in ir_attachment
# (the search is performed with sql as the search method of
# ir_attachment is overridden to hide attachments of deleted
# records)
query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'
cr.execute(query, (self._name, sub_ids))
attachments = Attachment.browse([row[0] for row in cr.fetchall()])
if attachments:
ir_attachment_unlink |= attachments.sudo()
# invalidate the *whole* cache, since the orm does not handle all
# changes made in the database, like cascading delete!
self.invalidate_cache()
if ir_model_data_unlink:
ir_model_data_unlink.unlink()
if ir_attachment_unlink:
ir_attachment_unlink.unlink()
# DLE P93: flush after the unlink, for recompute fields depending on
# the modified of the unlink
self.flush()
# auditing: deletions are infrequent and leave no trace in the database
_unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)
return True
def write(self, vals):
""" write(vals)
Updates all records in the current set with the provided values.
:param dict vals: fields to update and the value to set on them e.g::
{'foo': 1, 'bar': "Qux"}
will set the field ``foo`` to ``1`` and the field ``bar`` to
``"Qux"`` if those are valid (otherwise it will trigger an error).
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidationError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
* For numeric fields (:class:`~odoo.fields.Integer`,
:class:`~odoo.fields.Float`) the value should be of the
corresponding type
* For :class:`~odoo.fields.Boolean`, the value should be a
:class:`python:bool`
* For :class:`~odoo.fields.Selection`, the value should match the
selection values (generally :class:`python:str`, sometimes
:class:`python:int`)
* For :class:`~odoo.fields.Many2one`, the value should be the
database identifier of the record to set
* Other non-relational fields use a string for value
.. danger::
for historical and compatibility reasons,
:class:`~odoo.fields.Date` and
:class:`~odoo.fields.Datetime` fields use strings as values
(written and read) rather than :class:`~python:datetime.date` or
:class:`~python:datetime.datetime`. These date strings are
UTC-only and formatted according to
:const:`odoo.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
:const:`odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
* .. _openerp/models/relationals/format:
The expected value of a :class:`~odoo.fields.One2many` or
:class:`~odoo.fields.Many2many` relational field is a list of
:class:`~odoo.fields.Command` that manipulate the relation the
implement. There are a total of 7 commands:
:meth:`~odoo.fields.Command.create`,
:meth:`~odoo.fields.Command.update`,
:meth:`~odoo.fields.Command.delete`,
:meth:`~odoo.fields.Command.unlink`,
:meth:`~odoo.fields.Command.link`,
:meth:`~odoo.fields.Command.clear`, and
:meth:`~odoo.fields.Command.set`.
"""
if not self:
return True
self.check_access_rights('write')
self.check_field_access_rights('write', vals.keys())
self.check_access_rule('write')
env = self.env
bad_names = {'id', 'parent_path'}
if self._log_access:
# the superuser can set log_access fields while loading registry
if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
bad_names.update(LOG_ACCESS_COLUMNS)
# set magic fields
vals = {key: val for key, val in vals.items() if key not in bad_names}
if self._log_access:
vals.setdefault('write_uid', self.env.uid)
vals.setdefault('write_date', self.env.cr.now())
field_values = [] # [(field, value)]
determine_inverses = defaultdict(list) # {inverse: fields}
records_to_inverse = {} # {field: records}
relational_names = []
protected = set()
check_company = False
for fname, value in vals.items():
field = self._fields.get(fname)
if not field:
raise ValueError("Invalid field %r on model %r" % (fname, self._name))
field_values.append((field, value))
if field.inverse:
if field.type in ('one2many', 'many2many'):
# The written value is a list of commands that must applied
# on the field's current value. Because the field is
# protected while being written, the field's current value
# will not be computed and default to an empty recordset. So
# make sure the field's value is in cache before writing, in
# order to avoid an inconsistent update.
self[fname]
determine_inverses[field.inverse].append(field)
# DLE P150: `test_cancel_propagation`, `test_manufacturing_3_steps`, `test_manufacturing_flow`
# TODO: check whether still necessary
records_to_inverse[field] = self.filtered('id')
if field.relational or self.pool.field_inverses[field]:
relational_names.append(fname)
if field.inverse or (field.compute and not field.readonly):
if field.store or field.type not in ('one2many', 'many2many'):
# Protect the field from being recomputed while being
# inversed. In the case of non-stored x2many fields, the
# field's value may contain unexpeced new records (created
# by command 0). Those new records are necessary for
# inversing the field, but should no longer appear if the
# field is recomputed afterwards. Not protecting the field
# will automatically invalidate the field from the cache,
# forcing its value to be recomputed once dependencies are
# up-to-date.
protected.update(self.pool.field_computed.get(field, [field]))
if fname == 'company_id' or (field.relational and field.check_company):
check_company = True
# force the computation of fields that are computed with some assigned
# fields, but are not assigned themselves
to_compute = [field.name
for field in protected
if field.compute and field.name not in vals]
if to_compute:
self.recompute(to_compute, self)
# protect fields being written against recomputation
with env.protecting(protected, self):
# Determine records depending on values. When modifying a relational
# field, you have to recompute what depends on the field's values
# before and after modification. This is because the modification
# has an impact on the "data path" between a computed field and its
# dependency. Note that this double call to modified() is only
# necessary for relational fields.
#
# It is best explained with a simple example: consider two sales
# orders SO1 and SO2. The computed total amount on sales orders
# indirectly depends on the many2one field 'order_id' linking lines
# to their sales order. Now consider the following code:
#
# line = so1.line_ids[0] # pick a line from SO1
# line.order_id = so2 # move the line to SO2
#
# In this situation, the total amount must be recomputed on *both*
# sales order: the line's order before the modification, and the
# line's order after the modification.
self.modified(relational_names, before=True)
real_recs = self.filtered('id')
# field.write_sequence determines a priority for writing on fields.
# Monetary fields need their corresponding currency field in cache
# for rounding values. X2many fields must be written last, because
# they flush other fields when deleting lines.
for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
field.write(self, value)
# determine records depending on new values
#
# Call modified after write, because the modified can trigger a
# search which can trigger a flush which can trigger a recompute
# which remove the field from the recompute list while all the
# values required for the computation could not be yet in cache.
# e.g. Write on `name` of `res.partner` trigger the recompute of
# `display_name`, which triggers a search on child_ids to find the
# childs to which the display_name must be recomputed, which
# triggers the flush of `display_name` because the _order of
# res.partner includes display_name. The computation of display_name
# is then done too soon because the parent_id was not yet written.
# (`test_01_website_reset_password_tour`)
self.modified(vals)
if self._parent_store and self._parent_name in vals:
self.flush([self._parent_name])
# validate non-inversed fields first
inverse_fields = [f.name for fs in determine_inverses.values() for f in fs]
real_recs._validate_fields(vals, inverse_fields)
for fields in determine_inverses.values():
# write again on non-stored fields that have been invalidated from cache
for field in fields:
if not field.store and any(self.env.cache.get_missing_ids(real_recs, field)):
field.write(real_recs, vals[field.name])
# inverse records that are not being computed
try:
fields[0].determine_inverse(real_recs)
except AccessError as e:
if fields[0].inherited:
description = self.env['ir.model']._get(self._name).name
raise AccessError(
_("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
'previous_message': e.args[0],
'document_kind': description,
'document_model': self._name,
}
)
raise
# validate inversed fields
real_recs._validate_fields(inverse_fields)
if check_company and self._check_company_auto:
self._check_company()
return True
def _write(self, vals):
# low-level implementation of write()
if not self:
return True
self._check_concurrency()
cr = self._cr
# determine records that require updating parent_path
parent_records = self._parent_store_update_prepare(vals)
if self._log_access:
# set magic fields (already done by write(), but not for computed fields)
vals = dict(vals)
vals.setdefault('write_uid', self.env.uid)
vals.setdefault('write_date', self.env.cr.now())
# determine SQL values
columns = [] # list of (column_name, format, value)
for name, val in sorted(vals.items()):
if self._log_access and name in LOG_ACCESS_COLUMNS and not val:
continue
field = self._fields[name]
assert field.store
if field.deprecated:
_logger.warning('Field %s is deprecated: %s', field, field.deprecated)
assert field.column_type
columns.append((name, field.column_format, val))
# update columns
if columns:
query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
self._table, ','.join('"%s"=%s' % (column[0], column[1]) for column in columns),
)
params = [column[2] for column in columns]
for sub_ids in cr.split_for_in_conditions(set(self.ids)):
cr.execute(query, params + [sub_ids])
if cr.rowcount != len(sub_ids):
raise MissingError(
_('One of the records you are trying to modify has already been deleted (Document type: %s).', self._description)
+ '\n\n({} {}, {} {})'.format(_('Records:'), sub_ids[:6], _('User:'), self._uid)
)
# update parent_path
if parent_records:
parent_records._parent_store_update()
return True
@api.model_create_multi
@api.returns('self', lambda value: value.id)
def create(self, vals_list):
""" create(vals_list) -> records
Creates new records for the model.
The new records are initialized using the values from the list of dicts
``vals_list``, and if necessary those from :meth:`~.default_get`.
:param Union[list[dict], dict] vals_list:
values for the model's fields, as a list of dictionaries::
[{'field_name': field_value, ...}, ...]
For backward compatibility, ``vals_list`` may be a dictionary.
It is treated as a singleton list ``[vals]``, and a single record
is returned.
see :meth:`~.write` for details
:return: the created records
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidationError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
"""
if not vals_list:
return self.browse()
self = self.browse()
self.check_access_rights('create')
bad_names = {'id', 'parent_path'}
if self._log_access:
# the superuser can set log_access fields while loading registry
if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
bad_names.update(LOG_ACCESS_COLUMNS)
# classify fields for each record
data_list = []
inversed_fields = set()
for vals in vals_list:
# add missing defaults
vals = self._add_missing_default_values(vals)
# set magic fields
for name in bad_names:
vals.pop(name, None)
if self._log_access:
vals.setdefault('create_uid', self.env.uid)
vals.setdefault('create_date', self.env.cr.now())
vals.setdefault('write_uid', self.env.uid)
vals.setdefault('write_date', self.env.cr.now())
# distribute fields into sets for various purposes
data = {}
data['stored'] = stored = {}
data['inversed'] = inversed = {}
data['inherited'] = inherited = defaultdict(dict)
data['protected'] = protected = set()
for key, val in vals.items():
field = self._fields.get(key)
if not field:
raise ValueError("Invalid field %r on model %r" % (key, self._name))
if field.company_dependent:
irprop_def = self.env['ir.property']._get(key, self._name)
cached_def = field.convert_to_cache(irprop_def, self)
cached_val = field.convert_to_cache(val, self)
if cached_val == cached_def:
# val is the same as the default value defined in
# 'ir.property'; by design, 'ir.property' will not
# create entries specific to these records; skipping the
# field inverse saves 4 SQL queries
continue
if field.store:
stored[key] = val
if field.inherited:
inherited[field.related_field.model_name][key] = val
elif field.inverse:
inversed[key] = val
inversed_fields.add(field)
# protect non-readonly computed fields against (re)computation
if field.compute and not field.readonly:
protected.update(self.pool.field_computed.get(field, [field]))
data_list.append(data)
# create or update parent records
for model_name, parent_name in self._inherits.items():
parent_data_list = []
for data in data_list:
if not data['stored'].get(parent_name):
parent_data_list.append(data)
elif data['inherited'][model_name]:
parent = self.env[model_name].browse(data['stored'][parent_name])
parent.write(data['inherited'][model_name])
if parent_data_list:
parents = self.env[model_name].create([
data['inherited'][model_name]
for data in parent_data_list
])
for parent, data in zip(parents, parent_data_list):
data['stored'][parent_name] = parent.id
# create records with stored fields
records = self._create(data_list)
# protect fields being written against recomputation
protected = [(data['protected'], data['record']) for data in data_list]
with self.env.protecting(protected):
# group fields by inverse method (to call it once), and order groups
# by dependence (in case they depend on each other)
field_groups = (fields for _inv, fields in groupby(inversed_fields, attrgetter('inverse')))
for fields in field_groups:
# determine which records to inverse for those fields
inv_names = {field.name for field in fields}
rec_vals = [
(data['record'], {
name: data['inversed'][name]
for name in inv_names
if name in data['inversed']
})
for data in data_list
if not inv_names.isdisjoint(data['inversed'])
]
# If a field is not stored, its inverse method will probably
# write on its dependencies, which will invalidate the field on
# all records. We therefore inverse the field record by record.
if all(field.store or field.company_dependent for field in fields):
batches = [rec_vals]
else:
batches = [[rec_data] for rec_data in rec_vals]
for batch in batches:
for record, vals in batch:
record._update_cache(vals)
batch_recs = self.concat(*(record for record, vals in batch))
fields[0].determine_inverse(batch_recs)
# check Python constraints for non-stored inversed fields
for data in data_list:
data['record']._validate_fields(data['inversed'], data['stored'])
if self._check_company_auto:
records._check_company()
return records
@api.model
def _create(self, data_list):
""" Create records from the stored field values in ``data_list``. """
assert data_list
cr = self.env.cr
quote = '"{}"'.format
# insert rows
ids = [] # ids of created records
other_fields = OrderedSet() # non-column fields
translated_fields = OrderedSet() # translated fields
for data in data_list:
# determine column values
stored = data['stored']
columns = [('id', "nextval(%s)", self._sequence)]
for name, val in sorted(stored.items()):
field = self._fields[name]
assert field.store
if field.column_type:
col_val = field.convert_to_column(val, self, stored)
columns.append((name, field.column_format, col_val))
if field.translate is True:
translated_fields.add(field)
else:
other_fields.add(field)
# Insert rows one by one
# - as records don't all specify the same columns, code building batch-insert query
# was very complex
# - and the gains were low, so not worth spending so much complexity
#
# It also seems that we have to be careful with INSERTs in batch, because they have the
# same problem as SELECTs:
# If we inject a lot of data in a single query, we fall into pathological perfs in
# terms of SQL parser and the execution of the query itself.
# In SELECT queries, we inject max 1000 ids (integers) when we can, because we know
# that this limit is well managed by PostgreSQL.
# In INSERT queries, we inject integers (small) and larger data (TEXT blocks for
# example).
#
# The problem then becomes: how to "estimate" the right size of the batch to have
# good performance?
#
# This requires extensive testing, and it was preferred not to introduce INSERTs in
# batch, to avoid regressions as much as possible.
#
# That said, we haven't closed the door completely.
query = "INSERT INTO {} ({}) VALUES ({}) RETURNING id".format(
quote(self._table),
", ".join(quote(name) for name, fmt, val in columns),
", ".join(fmt for name, fmt, val in columns),
)
params = [val for name, fmt, val in columns]
cr.execute(query, params)
ids.append(cr.fetchone()[0])
# put the new records in cache, and update inverse fields, for many2one
#
# cachetoclear is an optimization to avoid modified()'s cost until other_fields are processed
cachetoclear = []
records = self.browse(ids)
inverses_update = defaultdict(list) # {(field, value): ids}
for data, record in zip(data_list, records):
data['record'] = record
# DLE P104: test_inherit.py, test_50_search_one2many
vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
set_vals = list(vals) + LOG_ACCESS_COLUMNS + [self.CONCURRENCY_CHECK_FIELD, 'id', 'parent_path']
for field in self._fields.values():
if field.type in ('one2many', 'many2many'):
self.env.cache.set(record, field, ())
elif field.related and not field.column_type:
self.env.cache.set(record, field, field.convert_to_cache(None, record))
# DLE P123: `test_adv_activity`, `test_message_assignation_inbox`, `test_message_log`, `test_create_mail_simple`, ...
# Set `mail.message.parent_id` to False in cache so it doesn't do the useless SELECT when computing the modified of `child_ids`
# in other words, if `parent_id` is not set, no other message `child_ids` are impacted.
# + avoid the fetch of fields which are False. e.g. if a boolean field is not passed in vals and as no default set in the field attributes,
# then we know it can be set to False in the cache in the case of a create.
elif field.name not in set_vals and not field.compute:
self.env.cache.set(record, field, field.convert_to_cache(None, record))
for fname, value in vals.items():
field = self._fields[fname]
if field.type in ('one2many', 'many2many'):
cachetoclear.append((record, field))
else:
cache_value = field.convert_to_cache(value, record)
self.env.cache.set(record, field, cache_value)
if field.type in ('many2one', 'many2one_reference') and self.pool.field_inverses[field]:
inverses_update[(field, cache_value)].append(record.id)
for (field, value), record_ids in inverses_update.items():
field._update_inverses(self.browse(record_ids), value)
# update parent_path
records._parent_store_create()
# protect fields being written against recomputation
protected = [(data['protected'], data['record']) for data in data_list]
with self.env.protecting(protected):
# mark computed fields as todo
records.modified(self._fields, create=True)
if other_fields:
# discard default values from context for other fields
others = records.with_context(clean_context(self._context))
for field in sorted(other_fields, key=attrgetter('_sequence')):
field.create([
(other, data['stored'][field.name])
for other, data in zip(others, data_list)
if field.name in data['stored']
])
# mark fields to recompute
records.modified([field.name for field in other_fields], create=True)
# if value in cache has not been updated by other_fields, remove it
for record, field in cachetoclear:
if self.env.cache.contains(record, field) and not self.env.cache.get(record, field):
self.env.cache.remove(record, field)
# check Python constraints for stored fields
records._validate_fields(name for data in data_list for name in data['stored'])
records.check_access_rule('create')
# add translations
if self.env.lang and self.env.lang != 'en_US':
Translations = self.env['ir.translation']
for field in translated_fields:
tname = "%s,%s" % (field.model_name, field.name)
for data in data_list:
if field.name in data['stored']:
record = data['record']
val = data['stored'][field.name]
Translations._set_ids(tname, 'model', self.env.lang, record.ids, val, val)
return records
def _compute_field_value(self, field):
fields.determine(field.compute, self)
if field.store and any(self._ids):
# check constraints of the fields that have been computed
fnames = [f.name for f in self.pool.field_computed[field]]
self.filtered('id')._validate_fields(fnames)
def _parent_store_create(self):
""" Set the parent_path field on ``self`` after its creation. """
if not self._parent_store:
return
query = """
UPDATE {0} node
SET parent_path=concat((SELECT parent.parent_path FROM {0} parent
WHERE parent.id=node.{1}), node.id, '/')
WHERE node.id IN %s
""".format(self._table, self._parent_name)
self._cr.execute(query, [tuple(self.ids)])
def _parent_store_update_prepare(self, vals):
""" Return the records in ``self`` that must update their parent_path
field. This must be called before updating the parent field.
"""
if not self._parent_store or self._parent_name not in vals:
return self.browse()
# No need to recompute the values if the parent is the same.
parent_val = vals[self._parent_name]
if parent_val:
query = """ SELECT id FROM {0}
WHERE id IN %s AND ({1} != %s OR {1} IS NULL) """
params = [tuple(self.ids), parent_val]
else:
query = """ SELECT id FROM {0}
WHERE id IN %s AND {1} IS NOT NULL """
params = [tuple(self.ids)]
query = query.format(self._table, self._parent_name)
self._cr.execute(query, params)
return self.browse([row[0] for row in self._cr.fetchall()])
def _parent_store_update(self):
""" Update the parent_path field of ``self``. """
cr = self.env.cr
# determine new prefix of parent_path
query = """
SELECT parent.parent_path FROM {0} node, {0} parent
WHERE node.id = %s AND parent.id = node.{1}
"""
cr.execute(query.format(self._table, self._parent_name), [self.ids[0]])
prefix = cr.fetchone()[0] if cr.rowcount else ''
# check for recursion
if prefix:
parent_ids = {int(label) for label in prefix.split('/')[:-1]}
if not parent_ids.isdisjoint(self._ids):
raise UserError(_("Recursion Detected."))
# update parent_path of all records and their descendants
query = """
UPDATE {0} child
SET parent_path = concat(%s, substr(child.parent_path,
length(node.parent_path) - length(node.id || '/') + 1))
FROM {0} node
WHERE node.id IN %s
AND child.parent_path LIKE concat(node.parent_path, '%%')
RETURNING child.id, child.parent_path
"""
cr.execute(query.format(self._table), [prefix, tuple(self.ids)])
# update the cache of updated nodes, and determine what to recompute
updated = dict(cr.fetchall())
records = self.browse(updated)
self.env.cache.update(records, self._fields['parent_path'], updated.values())
records.modified(['parent_path'])
def _load_records_write(self, values):
self.write(values)
def _load_records_create(self, values):
return self.create(values)
def _load_records(self, data_list, update=False):
""" Create or update records of this model, and assign XMLIDs.
:param data_list: list of dicts with keys `xml_id` (XMLID to
assign), `noupdate` (flag on XMLID), `values` (field values)
:param update: should be ``True`` when upgrading a module
:return: the records corresponding to ``data_list``
"""
original_self = self.browse()
# records created during installation should not display messages
self = self.with_context(install_mode=True)
imd = self.env['ir.model.data'].sudo()
# The algorithm below partitions 'data_list' into three sets: the ones
# to create, the ones to update, and the others. For each set, we assign
# data['record'] for each data. All those records are then retrieved for
# the result.
# determine existing xml_ids
xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
existing = {
("%s.%s" % row[1:3]): row
for row in imd._lookup_xmlids(xml_ids, self)
}
# determine which records to create and update
to_create = [] # list of data
to_update = [] # list of data
imd_data_list = [] # list of data for _update_xmlids()
for data in data_list:
xml_id = data.get('xml_id')
if not xml_id:
vals = data['values']
if vals.get('id'):
data['record'] = self.browse(vals['id'])
to_update.append(data)
elif not update:
to_create.append(data)
continue
row = existing.get(xml_id)
if not row:
to_create.append(data)
continue
d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
if self._name != d_model:
_logger.warning((
"For external id %s "
"when trying to create/update a record of model %s "
"found record of different model %s (%s)"
"\nUpdating record %s of target model %s"),
xml_id, self._name, d_model, d_id, d_id, self._name
)
raise ValidationError(
f"For external id {xml_id} "
f"when trying to create/update a record of model {self._name} "
f"found record of different model {d_model} ({d_id})"
)
record = self.browse(d_res_id)
if r_id:
data['record'] = record
imd_data_list.append(data)
if not (update and d_noupdate):
to_update.append(data)
else:
imd.browse(d_id).unlink()
to_create.append(data)
# update existing records
for data in to_update:
data['record']._load_records_write(data['values'])
# check for records to create with an XMLID from another module
module = self.env.context.get('install_module')
if module:
prefix = module + "."
for data in to_create:
if data.get('xml_id') and not data['xml_id'].startswith(prefix):
_logger.warning("Creating record %s in module %s.", data['xml_id'], module)
# create records
records = self._load_records_create([data['values'] for data in to_create])
for data, record in zip(to_create, records):
data['record'] = record
if data.get('xml_id'):
# add XML ids for parent records that have just been created
for parent_model, parent_field in self._inherits.items():
if not data['values'].get(parent_field):
imd_data_list.append({
'xml_id': f"{data['xml_id']}_{parent_model.replace('.', '_')}",
'record': record[parent_field],
'noupdate': data.get('noupdate', False),
})
imd_data_list.append(data)
# create or update XMLIDs
imd._update_xmlids(imd_data_list, update)
return original_self.concat(*(data['record'] for data in data_list))
# TODO: ameliorer avec NULL
@api.model
def _where_calc(self, domain, active_test=True):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
# if the object has an active field ('active', 'x_active'), filter out all
# inactive records unless they were explicitly asked for
if self._active_name and active_test and self._context.get('active_test', True):
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == self._active_name for item in domain):
domain = [(self._active_name, '=', 1)] + domain
if domain:
return expression.expression(domain, self).query
else:
return Query(self.env.cr, self._table, self._table_query)
def _check_qorder(self, word):
if not regex_order.match(word):
raise UserError(_(
'Invalid "order" specified (%s). A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)',
word,
))
return True
@api.model
def _apply_ir_rules(self, query, mode='read'):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if self.env.su:
return
# apply main rules on the object
Rule = self.env['ir.rule']
domain = Rule._compute_domain(self._name, mode)
if domain:
expression.expression(domain, self.sudo(), self._table, query)
# apply ir.rules from the parents (through _inherits)
for parent_model_name in self._inherits:
domain = Rule._compute_domain(parent_model_name, mode)
if domain:
parent_model = self.env[parent_model_name]
parent_alias = self._inherits_join_add(self, parent_model_name, query)
expression.expression(domain, parent_model.sudo(), parent_alias, query)
@api.model
def _generate_translated_field(self, table_alias, field, query):
"""
Add possibly missing JOIN with translations table to ``query`` and
generate the expression for the translated field.
:return: the qualified field name (or expression) to use for ``field``
"""
if self.env.lang:
alias = query.left_join(
table_alias, 'id', 'ir_translation', 'res_id', field,
extra='"{rhs}"."type" = \'model\' AND "{rhs}"."name" = %s AND "{rhs}"."lang" = %s AND "{rhs}"."value" != %s',
extra_params=["%s,%s" % (self._name, field), self.env.lang, ""],
)
return 'COALESCE("%s"."%s", "%s"."%s")' % (alias, 'value', table_alias, field)
else:
return '"%s"."%s"' % (table_alias, field)
@api.model
def _generate_m2o_order_by(self, alias, order_field, query, reverse_direction, seen):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
field = self._fields[order_field]
if field.inherited:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(alias, order_field, query)
alias, order_field = qualified_field.replace('"', '').split('.', 1)
field = field.base_field
assert field.type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not field.store:
_logger.debug("Many2one function/related fields must be stored "
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return []
# figure out the applicable order_by for the m2o
dest_model = self.env[field.comodel_name]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
dest_alias = query.left_join(alias, order_field, dest_model._table, 'id', order_field)
return dest_model._generate_order_by_inner(dest_alias, m2o_order, query,
reverse_direction, seen)
@api.model
def _generate_order_by_inner(self, alias, order_spec, query, reverse_direction=False, seen=None):
if seen is None:
seen = set()
self._check_qorder(order_spec)
order_by_elements = []
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip().upper() if len(order_split) == 2 else ''
if reverse_direction:
order_direction = 'ASC' if order_direction == 'DESC' else 'DESC'
do_reverse = order_direction == 'DESC'
field = self._fields.get(order_field)
if not field:
raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
if order_field == 'id':
order_by_elements.append('"%s"."%s" %s' % (alias, order_field, order_direction))
else:
if field.inherited:
field = field.base_field
if field.store and field.type == 'many2one':
key = (field.model_name, field.comodel_name, order_field)
if key not in seen:
seen.add(key)
order_by_elements += self._generate_m2o_order_by(alias, order_field, query, do_reverse, seen)
elif field.store and field.column_type:
qualifield_name = self._inherits_join_calc(alias, order_field, query)
if field.type == 'boolean':
qualifield_name = "COALESCE(%s, false)" % qualifield_name
order_by_elements.append("%s %s" % (qualifield_name, order_direction))
else:
_logger.warning("Model %r cannot be sorted on field %r (not a column)", self._name, order_field)
continue # ignore non-readable or "non-joinable" fields
return order_by_elements
@api.model
def _generate_order_by(self, order_spec, query):
"""
Attempt to construct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise ValueError in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = self._generate_order_by_inner(self._table, order_spec, query)
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
@api.model
def _flush_search(self, domain, fields=None, order=None, seen=None):
""" Flush all the fields appearing in `domain`, `fields` and `order`. """
if seen is None:
seen = set()
elif self._name in seen:
return
seen.add(self._name)
to_flush = defaultdict(set) # {model_name: field_names}
if fields:
to_flush[self._name].update(fields)
# also take into account the fields in the record rules
domain = list(domain) + (self.env['ir.rule']._compute_domain(self._name, 'read') or [])
for arg in domain:
if isinstance(arg, str):
continue
if not isinstance(arg[0], str):
continue
model_name = self._name
for fname in arg[0].split('.'):
field = self.env[model_name]._fields.get(fname)
if not field:
break
to_flush[model_name].add(fname)
# DLE P111: `test_message_process_email_partner_find`
# Search on res.users with email_normalized in domain
# must trigger the recompute and flush of res.partner.email_normalized
if field.related_field:
model = self
# DLE P129: `test_transit_multi_companies`
# `self.env['stock.picking'].search([('product_id', '=', product.id)])`
# Should flush `stock.move.picking_ids` as `product_id` on `stock.picking` is defined as:
# `product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=False)`
for f in field.related.split('.'):
rfield = model._fields.get(f)
if rfield:
to_flush[model._name].add(f)
if rfield.type in ('many2one', 'one2many', 'many2many'):
model = self.env[rfield.comodel_name]
if rfield.type == 'one2many' and rfield.inverse_name:
to_flush[rfield.comodel_name].add(rfield.inverse_name)
if field.comodel_name:
model_name = field.comodel_name
# hierarchy operators need the parent field
if arg[1] in ('child_of', 'parent_of'):
model = self.env[model_name]
if model._parent_store:
to_flush[model_name].add(model._parent_name)
# flush the order fields
order_spec = order or self._order
for order_part in order_spec.split(','):
order_field = order_part.split()[0]
field = self._fields.get(order_field)
if field is not None:
to_flush[self._name].add(order_field)
if field.relational:
self.env[field.comodel_name]._flush_search([], seen=seen)
if self._active_name:
to_flush[self._name].add(self._active_name)
# flush model dependencies (recursively)
if self._depends:
models = [self]
while models:
model = models.pop()
for model_name, field_names in model._depends.items():
to_flush[model_name].update(field_names)
models.append(self.env[model_name])
for model_name, field_names in to_flush.items():
self.env[model_name].flush(field_names)
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
:return: a list of record ids or an integer (if count is True)
"""
model = self.with_user(access_rights_uid) if access_rights_uid else self
model.check_access_rights('read')
if expression.is_false(self, args):
# optimization: no need to query, as no record satisfies the domain
return 0 if count else []
# the flush must be done before the _where_calc(), as the latter can do some selects
self._flush_search(args, order=order)
query = self._where_calc(args)
self._apply_ir_rules(query, 'read')
if count:
# Ignore order, limit and offset when just counting, they don't make sense and could
# hurt performance
query_str, params = query.select("count(1)")
self._cr.execute(query_str, params)
res = self._cr.fetchone()
return res[0]
query.order = self._generate_order_by(order, query).replace('ORDER BY ', '')
query.limit = limit
query.offset = offset
return query
@api.returns(None, lambda value: value[0])
def copy_data(self, default=None):
"""
Copy given record's data with all its fields values
:param default: field values to override in the original values of the copied record
:return: list with a dictionary containing all the field values
"""
# In the old API, this method took a single id and return a dict. When
# invoked with the new API, it returned a list of dicts.
self.ensure_one()
# avoid recursion through already copied records in case of circular relationship
if '__copy_data_seen' not in self._context:
self = self.with_context(__copy_data_seen=defaultdict(set))
seen_map = self._context['__copy_data_seen']
if self.id in seen_map[self._name]:
return
seen_map[self._name].add(self.id)
default = dict(default or [])
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_path'])
whitelist = set(name for name, field in self._fields.items() if not field.inherited)
def blacklist_given_fields(model):
# blacklist the fields that are given by inheritance
for parent_model, parent_field in model._inherits.items():
blacklist.add(parent_field)
if parent_field in default:
# all the fields of 'parent_model' are given by the record:
# default[parent_field], except the ones redefined in self
blacklist.update(set(self.env[parent_model]._fields) - whitelist)
else:
blacklist_given_fields(self.env[parent_model])
# blacklist deprecated fields
for name, field in model._fields.items():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = {name: field
for name, field in self._fields.items()
if field.copy and name not in default and name not in blacklist}
for name, field in fields_to_copy.items():
if field.type == 'one2many':
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [rec.copy_data()[0] for rec in self[name].sorted(key='id')]
# the lines are duplicated using the wrong (old) parent, but then are
# reassigned to the correct one thanks to the (Command.CREATE, 0, ...)
default[name] = [Command.create(line) for line in lines if line]
elif field.type == 'many2many':
default[name] = [Command.set(self[name].ids)]
else:
default[name] = field.convert_to_write(self[name], self)
return [default]
def copy_translations(old, new, excluded=()):
""" Recursively copy the translations from original to new record
:param old: the original record
:param new: the new record (copy of the original one)
:param excluded: a container of user-provided field names
"""
# avoid recursion through already copied records in case of circular relationship
if '__copy_translations_seen' not in old._context:
old = old.with_context(__copy_translations_seen=defaultdict(set))
seen_map = old._context['__copy_translations_seen']
if old.id in seen_map[old._name]:
return
seen_map[old._name].add(old.id)
def get_trans(field, old, new):
""" Return the 'name' of the translations to search for, together
with the record ids corresponding to ``old`` and ``new``.
"""
if field.inherited:
pname = field.related.split('.')[0]
return get_trans(field.related_field, old[pname], new[pname])
return "%s,%s" % (field.model_name, field.name), old.id, new.id
# removing the lang to compare untranslated values
old_wo_lang, new_wo_lang = (old + new).with_context(lang=None)
Translation = old.env['ir.translation']
for name, field in old._fields.items():
if not field.copy:
continue
if field.inherited and field.related.split('.')[0] in excluded:
# inherited fields that come from a user-provided parent record
# must not copy translations, as the parent record is not a copy
# of the old parent record
continue
if field.type == 'one2many' and field.name not in excluded:
# we must recursively copy the translations for o2m; here we
# rely on the order of the ids to match the translations as
# foreseen in copy_data()
old_lines = old[name].sorted(key='id')
new_lines = new[name].sorted(key='id')
for (old_line, new_line) in zip(old_lines, new_lines):
# don't pass excluded as it is not about those lines
old_line.copy_translations(new_line)
elif field.translate:
# for translatable fields we copy their translations
trans_name, source_id, target_id = get_trans(field, old, new)
domain = [('name', '=', trans_name), ('res_id', '=', source_id)]
new_val = new_wo_lang[name]
if old.env.lang and callable(field.translate):
# the new value *without lang* must be the old value without lang
new_wo_lang[name] = old_wo_lang[name]
vals_list = []
for vals in Translation.search_read(domain):
del vals['id']
del vals['module'] # duplicated vals is not linked to any module
vals['res_id'] = target_id
if not callable(field.translate):
vals['src'] = new_wo_lang[name]
if vals['lang'] == old.env.lang and field.translate is True:
# update master record if the new_val was not changed by copy override
if new_val == old[name]:
new_wo_lang[name] = old_wo_lang[name]
vals['src'] = old_wo_lang[name]
# the value should be the new value (given by copy())
vals['value'] = new_val
vals_list.append(vals)
Translation._upsert_translations(vals_list)
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
""" copy(default=None)
Duplicate record ``self`` updating it with default values
:param dict default: dictionary of field values to override in the
original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
:returns: new record
"""
self.ensure_one()
vals = self.with_context(active_test=False).copy_data(default)[0]
# To avoid to create a translation in the lang of the user, copy_translation will do it
new = self.with_context(lang=None).create(vals).with_env(self.env)
self.with_context(from_copy_translation=True).copy_translations(new, excluded=default or ())
return new
@api.returns('self')
def exists(self):
""" exists() -> records
Returns the subset of records in ``self`` that exist, and marks deleted
records as such in cache. It can be used as a test on records::
if record.exists():
...
By convention, new records are returned as existing.
"""
new_ids, ids = partition(lambda i: isinstance(i, NewId), self._ids)
if not ids:
return self
query = Query(self.env.cr, self._table, self._table_query)
query.add_where(f'"{self._table}".id IN %s', [tuple(ids)])
query_str, params = query.select()
self.env.cr.execute(query_str, params)
valid_ids = set([r[0] for r in self._cr.fetchall()] + new_ids)
return self.browse(i for i in self._ids if i in valid_ids)
def _check_recursion(self, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a
loop is detected or until a top-level record is found.
:param parent: optional parent field name (default: ``self._parent_name``)
:return: **True** if no loop was found, **False** otherwise.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
cr = self._cr
self.flush([parent])
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in self.ids:
current_id = id
while current_id:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, field_name):
"""
Verifies that there is no loop in a directed graph of records, by
following a many2many relationship with the given field name.
:param field_name: field to check
:return: **True** if no loop was found, **False** otherwise.
"""
field = self._fields.get(field_name)
if not (field and field.type == 'many2many' and
field.comodel_name == self._name and field.store):
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
self.flush([field_name])
cr = self._cr
query = 'SELECT "%s", "%s" FROM "%s" WHERE "%s" IN %%s AND "%s" IS NOT NULL' % \
(field.column1, field.column2, field.relation, field.column1, field.column2)
succs = defaultdict(set) # transitive closure of successors
preds = defaultdict(set) # transitive closure of predecessors
todo, done = set(self.ids), set()
while todo:
# retrieve the respective successors of the nodes in 'todo'
cr.execute(query, [tuple(todo)])
done.update(todo)
todo.clear()
for id1, id2 in cr.fetchall():
# connect id1 and its predecessors to id2 and its successors
for x, y in itertools.product([id1] + list(preds[id1]),
[id2] + list(succs[id2])):
if x == y:
return False # we found a cycle here!
succs[x].add(y)
preds[y].add(x)
if id2 not in done:
todo.add(id2)
return True
def _get_external_ids(self):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_external_ids() -> { 'id': ['module.external_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
result = defaultdict(list)
domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id'], order='id'):
result[data['res_id']].append('%(module)s.%(name)s' % data)
return {
record.id: result[record._origin.id]
for record in self
}
def get_external_id(self):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_external_ids()
return {key: val[0] if val else ''
for key, val in results.items()}
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
# Transience
@classmethod
def is_transient(cls):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return cls._transient
@api.model
def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None, **read_kwargs):
"""Perform a :meth:`search` followed by a :meth:`read`.
:param domain: Search domain, see ``args`` parameter in :meth:`search`.
Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in :meth:`read`.
Defaults to all fields.
:param int offset: Number of records to skip, see ``offset`` parameter in :meth:`search`.
Defaults to 0.
:param int limit: Maximum number of records to return, see ``limit`` parameter in :meth:`search`.
Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in :meth:`search`.
Defaults to no sort.
:param read_kwargs: All read keywords arguments used to call read(..., **read_kwargs) method
E.g. you can use search_read(..., load='') in order to avoid computing name_get
:return: List of dictionaries containing the asked fields.
:rtype: list(dict).
"""
records = self.search(domain or [], offset=offset, limit=limit, order=order)
if not records:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': record.id} for record in records]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
if 'active_test' in self._context:
context = dict(self._context)
del context['active_test']
records = records.with_context(context)
result = records.read(fields, **read_kwargs)
if len(result) <= 1:
return result
# reorder read
index = {vals['id']: vals for vals in result}
return [index[record.id] for record in records if record.id in index]
def toggle_active(self):
""" Inverse the value of the field ``(x_)active`` on the records in ``self``. """
active_recs = self.filtered(self._active_name)
active_recs[self._active_name] = False
(self - active_recs)[self._active_name] = True
def action_archive(self):
""" Set (x_)active=False on a recordset, by calling toggle_active to
take the corresponding actions according to the model
"""
return self.filtered(lambda record: record[self._active_name]).toggle_active()
def action_unarchive(self):
""" Set (x_)active=True on a recordset, by calling toggle_active to
take the corresponding actions according to the model
"""
return self.filtered(lambda record: not record[self._active_name]).toggle_active()
def _register_hook(self):
""" stuff to do right after the registry is built """
pass
def _unregister_hook(self):
""" Clean up what `~._register_hook` has done. """
pass
@classmethod
def _patch_method(cls, name, method):
""" Monkey-patch a method for all instances of this model. This replaces
the method called ``name`` by ``method`` in the given class.
The original method is then accessible via ``method.origin``, and it
can be restored with :meth:`~._revert_method`.
Example::
def do_write(self, values):
# do stuff, and call the original method
return do_write.origin(self, values)
# patch method write of model
model._patch_method('write', do_write)
# this will call do_write
records = model.search([...])
records.write(...)
# restore the original method
model._revert_method('write')
"""
origin = getattr(cls, name)
method.origin = origin
# propagate decorators from origin to method, and apply api decorator
wrapped = api.propagate(origin, method)
wrapped.origin = origin
setattr(cls, name, wrapped)
@classmethod
def _revert_method(cls, name):
""" Revert the original method called ``name`` in the given class.
See :meth:`~._patch_method`.
"""
method = getattr(cls, name)
setattr(cls, name, method.origin)
#
# Instance creation
#
# An instance represents an ordered collection of records in a given
# execution environment. The instance object refers to the environment, and
# the records themselves are represented by their cache dictionary. The 'id'
# of each record is found in its corresponding cache dictionary.
#
# This design has the following advantages:
# - cache access is direct and thus fast;
# - one can consider records without an 'id' (see new records);
# - the global cache is only an index to "resolve" a record 'id'.
#
@classmethod
def _browse(cls, env, ids, prefetch_ids):
""" Create a recordset instance.
:param env: an environment
:param ids: a tuple of record ids
:param prefetch_ids: a collection of record ids (for prefetching)
"""
records = object.__new__(cls)
records.env = env
records._ids = ids
records._prefetch_ids = prefetch_ids
return records
def browse(self, ids=None):
""" browse([ids]) -> records
Returns a recordset for the ids provided as parameter in the current
environment.
.. code-block:: python
self.browse([7, 18, 12])
res.partner(7, 18, 12)
:param ids: id(s)
:type ids: int or list(int) or None
:return: recordset
"""
if not ids:
ids = ()
elif ids.__class__ in IdType:
ids = (ids,)
else:
ids = tuple(ids)
return self._browse(self.env, ids, ids)
#
# Internal properties, for manipulating the instance's implementation
#
@property
def ids(self):
""" Return the list of actual record ids corresponding to ``self``. """
return list(origin_ids(self._ids))
# backward-compatibility with former browse records
_cr = property(lambda self: self.env.cr)
_uid = property(lambda self: self.env.uid)
_context = property(lambda self: self.env.context)
#
# Conversion methods
#
def ensure_one(self):
"""Verify that the current recorset holds a single record.
:raise odoo.exceptions.ValueError: ``len(self) != 1``
"""
try:
# unpack to ensure there is only one value is faster than len when true and
# has a significant impact as this check is largely called
_id, = self._ids
return self
except ValueError:
raise ValueError("Expected singleton: %s" % self)
def with_env(self, env):
"""Return a new version of this recordset attached to the provided environment.
:param env:
:type env: :class:`~odoo.api.Environment`
.. warning::
The new environment will not benefit from the current
environment's data cache, so later data access may incur extra
delays while re-fetching from the database.
The returned recordset has the same prefetch object as ``self``.
"""
return self._browse(env, self._ids, self._prefetch_ids)
def sudo(self, flag=True):
""" sudo([flag=True])
Returns a new version of this recordset with superuser mode enabled or
disabled, depending on `flag`. The superuser mode does not change the
current user, and simply bypasses access rights checks.
.. warning::
Using ``sudo`` could cause data access to cross the
boundaries of record rules, possibly mixing records that
are meant to be isolated (e.g. records from different
companies in multi-company environments).
It may lead to un-intuitive results in methods which select one
record among many - for example getting the default company, or
selecting a Bill of Materials.
.. note::
Because the record rules and access control will have to be
re-evaluated, the new recordset will not benefit from the current
environment's data cache, so later data access may incur extra
delays while re-fetching from the database.
The returned recordset has the same prefetch object as ``self``.
"""
if not isinstance(flag, bool):
_logger.warning("deprecated use of sudo(user), use with_user(user) instead", stack_info=True)
return self.with_user(flag)
return self.with_env(self.env(su=flag))
def with_user(self, user):
""" with_user(user)
Return a new version of this recordset attached to the given user, in
non-superuser mode, unless `user` is the superuser (by convention, the
superuser is always in superuser mode.)
"""
if not user:
return self
return self.with_env(self.env(user=user, su=False))
def with_company(self, company):
""" with_company(company)
Return a new version of this recordset with a modified context, such that::
result.env.company = company
result.env.companies = self.env.companies | company
:param company: main company of the new environment.
:type company: :class:`~odoo.addons.base.models.res_company` or int
.. warning::
When using an unauthorized company for current user,
accessing the company(ies) on the environment may trigger
an AccessError if not done in a sudoed environment.
"""
if not company:
# With company = None/False/0/[]/empty recordset: keep current environment
return self
company_id = int(company)
allowed_company_ids = self.env.context.get('allowed_company_ids', [])
if allowed_company_ids and company_id == allowed_company_ids[0]:
return self
# Copy the allowed_company_ids list
# to avoid modifying the context of the current environment.
allowed_company_ids = list(allowed_company_ids)
if company_id in allowed_company_ids:
allowed_company_ids.remove(company_id)
allowed_company_ids.insert(0, company_id)
return self.with_context(allowed_company_ids=allowed_company_ids)
def with_context(self, *args, **kwargs):
""" with_context([context][, **overrides]) -> records
Returns a new version of this recordset attached to an extended
context.
The extended context is either the provided ``context`` in which
``overrides`` are merged or the *current* context in which
``overrides`` are merged e.g.::
# current context is {'key1': True}
r2 = records.with_context({}, key2=True)
# -> r2._context is {'key2': True}
r2 = records.with_context(key2=True)
# -> r2._context is {'key1': True, 'key2': True}
.. note:
The returned recordset has the same prefetch object as ``self``.
"""
if (args and 'force_company' in args[0]) or 'force_company' in kwargs:
_logger.warning(
"Context key 'force_company' is no longer supported. "
"Use with_company(company) instead.",
stack_info=True,
)
if (args and 'company' in args[0]) or 'company' in kwargs:
_logger.warning(
"Context key 'company' is not recommended, because "
"of its special meaning in @depends_context.",
stack_info=True,
)
context = dict(args[0] if args else self._context, **kwargs)
if 'allowed_company_ids' not in context and 'allowed_company_ids' in self._context:
# Force 'allowed_company_ids' to be kept when context is overridden
# without 'allowed_company_ids'
context['allowed_company_ids'] = self._context['allowed_company_ids']
return self.with_env(self.env(context=context))
def with_prefetch(self, prefetch_ids=None):
""" with_prefetch([prefetch_ids]) -> records
Return a new version of this recordset that uses the given prefetch ids,
or ``self``'s ids if not given.
"""
if prefetch_ids is None:
prefetch_ids = self._ids
return self._browse(self.env, self._ids, prefetch_ids)
def _update_cache(self, values, validate=True):
""" Update the cache of ``self`` with ``values``.
:param values: dict of field values, in any format.
:param validate: whether values must be checked
"""
self.ensure_one()
cache = self.env.cache
fields = self._fields
try:
field_values = [(fields[name], value) for name, value in values.items()]
except KeyError as e:
raise ValueError("Invalid field %r on model %r" % (e.args[0], self._name))
# convert monetary fields after other columns for correct value rounding
for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
cache.set(self, field, field.convert_to_cache(value, self, validate))
# set inverse fields on new records in the comodel
if field.relational:
inv_recs = self[field.name].filtered(lambda r: not r.id)
if not inv_recs:
continue
for invf in self.pool.field_inverses[field]:
# DLE P98: `test_40_new_fields`
# /home/dle/src/odoo/master-nochange-fp/odoo/addons/test_new_api/tests/test_new_fields.py
# Be careful to not break `test_onchange_taxes_1`, `test_onchange_taxes_2`, `test_onchange_taxes_3`
# If you attempt to find a better solution
for inv_rec in inv_recs:
if not cache.contains(inv_rec, invf):
val = invf.convert_to_cache(self, inv_rec, validate=False)
cache.set(inv_rec, invf, val)
else:
invf._update(inv_rec, self)
def _convert_to_record(self, values):
""" Convert the ``values`` dictionary from the cache format to the
record format.
"""
return {
name: self._fields[name].convert_to_record(value, self)
for name, value in values.items()
}
def _convert_to_write(self, values):
""" Convert the ``values`` dictionary into the format of :meth:`write`. """
fields = self._fields
result = {}
for name, value in values.items():
if name in fields:
field = fields[name]
value = field.convert_to_write(value, self)
if not isinstance(value, NewId):
result[name] = value
return result
#
# Record traversal and update
#
def _mapped_func(self, func):
""" Apply function ``func`` on all records in ``self``, and return the
result as a list or a recordset (if ``func`` returns recordsets).
"""
if self:
vals = [func(rec) for rec in self]
if isinstance(vals[0], BaseModel):
return vals[0].union(*vals) # union of all recordsets
return vals
else:
vals = func(self)
return vals if isinstance(vals, BaseModel) else []
def mapped(self, func):
"""Apply ``func`` on all records in ``self``, and return the result as a
list or a recordset (if ``func`` return recordsets). In the latter
case, the order of the returned recordset is arbitrary.
:param func: a function or a dot-separated sequence of field names
:type func: callable or str
:return: self if func is falsy, result of func applied to all ``self`` records.
:rtype: list or recordset
.. code-block:: python3
# returns a list of summing two fields for each record in the set
records.mapped(lambda r: r.field1 + r.field2)
The provided function can be a string to get field values:
.. code-block:: python3
# returns a list of names
records.mapped('name')
# returns a recordset of partners
records.mapped('partner_id')
# returns the union of all partner banks, with duplicates removed
records.mapped('partner_id.bank_ids')
"""
if not func:
return self # support for an empty path of fields
if isinstance(func, str):
recs = self
for name in func.split('.'):
recs = recs._fields[name].mapped(recs)
return recs
else:
return self._mapped_func(func)
def _mapped_cache(self, name_seq):
""" Same as `~.mapped`, but ``name_seq`` is a dot-separated sequence of
field names, and only cached values are used.
"""
recs = self
for name in name_seq.split('.'):
field = recs._fields[name]
null = field.convert_to_cache(False, self, validate=False)
if recs:
recs = recs.mapped(lambda rec: field.convert_to_record(rec._cache.get(name, null), rec))
else:
recs = field.convert_to_record(null, recs)
return recs
def filtered(self, func):
"""Return the records in ``self`` satisfying ``func``.
:param func: a function or a dot-separated sequence of field names
:type func: callable or str
:return: recordset of records satisfying func, may be empty.
.. code-block:: python3
# only keep records whose company is the current user's
records.filtered(lambda r: r.company_id == user.company_id)
# only keep records whose partner is a company
records.filtered("partner_id.is_company")
"""
if isinstance(func, str):
name = func
func = lambda rec: any(rec.mapped(name))
# populate cache
self.mapped(name)
return self.browse([rec.id for rec in self if func(rec)])
def filtered_domain(self, domain):
if not domain: return self
result = []
for d in reversed(domain):
if d == '|':
result.append(result.pop() | result.pop())
elif d == '!':
result.append(self - result.pop())
elif d == '&':
result.append(result.pop() & result.pop())
elif d == expression.TRUE_LEAF:
result.append(self)
elif d == expression.FALSE_LEAF:
result.append(self.browse())
else:
(key, comparator, value) = d
if comparator in ('child_of', 'parent_of'):
result.append(self.search([('id', 'in', self.ids), d]))
continue
if key.endswith('.id'):
key = key[:-3]
if key == 'id':
key = ''
# determine the field with the final type for values
field = None
if key:
model = self.browse()
for fname in key.split('.'):
field = model._fields[fname]
model = model[fname]
if comparator in ('like', 'ilike', '=like', '=ilike', 'not ilike', 'not like'):
value_esc = value.replace('_', '?').replace('%', '*').replace('[', '?')
records_ids = OrderedSet()
for rec in self:
data = rec.mapped(key)
if isinstance(data, BaseModel):
v = value
if (isinstance(value, list) or isinstance(value, tuple)) and len(value):
v = value[0]
if isinstance(v, str):
data = data.mapped('display_name')
else:
data = data and data.ids or [False]
elif field and field.type in ('date', 'datetime'):
# convert all date and datetime values to datetime
normalize = Datetime.to_datetime
if isinstance(value, (list, tuple)):
value = [normalize(v) for v in value]
else:
value = normalize(value)
data = [normalize(d) for d in data]
if comparator in ('in', 'not in'):
if not (isinstance(value, list) or isinstance(value, tuple)):
value = [value]
if comparator == '=':
ok = value in data
elif comparator == 'in':
ok = any(map(lambda x: x in data, value))
elif comparator == '<':
ok = any(map(lambda x: x is not None and x < value, data))
elif comparator == '>':
ok = any(map(lambda x: x is not None and x > value, data))
elif comparator == '<=':
ok = any(map(lambda x: x is not None and x <= value, data))
elif comparator == '>=':
ok = any(map(lambda x: x is not None and x >= value, data))
elif comparator in ('!=', '<>'):
ok = value not in data
elif comparator == 'not in':
ok = all(map(lambda x: x not in data, value))
elif comparator == 'not ilike':
data = [(x or "") for x in data]
ok = all(map(lambda x: value.lower() not in x.lower(), data))
elif comparator == 'ilike':
data = [(x or "").lower() for x in data]
ok = bool(fnmatch.filter(data, '*'+(value_esc or '').lower()+'*'))
elif comparator == 'not like':
data = [(x or "") for x in data]
ok = all(map(lambda x: value not in x, data))
elif comparator == 'like':
data = [(x or "") for x in data]
ok = bool(fnmatch.filter(data, value and '*'+value_esc+'*'))
elif comparator == '=?':
ok = (value in data) or not value
elif comparator in ('=like'):
data = [(x or "") for x in data]
ok = bool(fnmatch.filter(data, value_esc))
elif comparator in ('=ilike'):
data = [(x or "").lower() for x in data]
ok = bool(fnmatch.filter(data, value and value_esc.lower()))
else:
raise ValueError
if ok:
records_ids.add(rec.id)
result.append(self.browse(records_ids))
while len(result)>1:
result.append(result.pop() & result.pop())
return result[0]
def sorted(self, key=None, reverse=False):
"""Return the recordset ``self`` ordered by ``key``.
:param key: either a function of one argument that returns a
comparison key for each record, or a field name, or ``None``, in
which case records are ordered according the default model's order
:type key: callable or str or None
:param bool reverse: if ``True``, return the result in reverse order
.. code-block:: python3
# sort records by name
records.sorted(key=lambda r: r.name)
"""
if key is None:
recs = self.search([('id', 'in', self.ids)])
return self.browse(reversed(recs._ids)) if reverse else recs
if isinstance(key, str):
key = itemgetter(key)
return self.browse(item.id for item in sorted(self, key=key, reverse=reverse))
def update(self, values):
""" Update the records in ``self`` with ``values``. """
for record in self:
for name, value in values.items():
record[name] = value
@api.model
def flush(self, fnames=None, records=None):
""" Process all the pending computations (on all models), and flush all
the pending updates to the database.
:param fnames (list<str>): list of field names to flush. If given,
limit the processing to the given fields of the current model.
:param records (Model): if given (together with ``fnames``), limit the
processing to the given records.
"""
def process(model, id_vals):
# group record ids by vals, to update in batch when possible
updates = defaultdict(list)
for rid, vals in id_vals.items():
updates[frozendict(vals)].append(rid)
for vals, ids in updates.items():
recs = model.browse(ids)
try:
recs._write(vals)
except MissingError:
recs.exists()._write(vals)
if fnames is None:
# flush everything
self.recompute()
while self.env.all.towrite:
model_name, id_vals = self.env.all.towrite.popitem()
process(self.env[model_name], id_vals)
else:
# flush self's model if any of the fields must be flushed
self.recompute(fnames, records=records)
# check whether any of 'records' must be flushed
if records is not None:
fnames = set(fnames)
towrite = self.env.all.towrite.get(self._name)
if not towrite or all(
fnames.isdisjoint(towrite.get(record.id, ()))
for record in records
):
return
# DLE P76: test_onchange_one2many_with_domain_on_related_field
# ```
# email.important = True
# self.assertIn(email, discussion.important_emails)
# ```
# When a search on a field coming from a related occurs (the domain
# on discussion.important_emails field), make sure the related field
# is flushed
model_fields = {}
for fname in fnames:
field = self._fields[fname]
model_fields.setdefault(field.model_name, []).append(field)
if field.related_field:
model_fields.setdefault(field.related_field.model_name, []).append(field.related_field)
for model_name, fields in model_fields.items():
if any(
field.name in vals
for vals in self.env.all.towrite.get(model_name, {}).values()
for field in fields
):
id_vals = self.env.all.towrite.pop(model_name)
process(self.env[model_name], id_vals)
# missing for one2many fields, flush their inverse
for fname in fnames:
field = self._fields[fname]
if field.type == 'one2many' and field.inverse_name:
self.env[field.comodel_name].flush([field.inverse_name])
#
# New records - represent records that do not exist in the database yet;
# they are used to perform onchanges.
#
@api.model
def new(self, values={}, origin=None, ref=None):
""" new([values], [origin], [ref]) -> record
Return a new record instance attached to the current environment and
initialized with the provided ``value``. The record is *not* created
in database, it only exists in memory.
One can pass an ``origin`` record, which is the actual record behind the
result. It is retrieved as ``record._origin``. Two new records with the
same origin record are considered equal.
One can also pass a ``ref`` value to identify the record among other new
records. The reference is encapsulated in the ``id`` of the record.
"""
if origin is not None:
origin = origin.id
record = self.browse([NewId(origin, ref)])
record._update_cache(values, validate=False)
return record
@property
def _origin(self):
""" Return the actual records corresponding to ``self``. """
ids = tuple(origin_ids(self._ids))
prefetch_ids = IterableGenerator(origin_ids, self._prefetch_ids)
return self._browse(self.env, ids, prefetch_ids)
#
# "Dunder" methods
#
def __bool__(self):
""" Test whether ``self`` is nonempty. """
return bool(getattr(self, '_ids', True))
__nonzero__ = __bool__
def __len__(self):
""" Return the size of ``self``. """
return len(self._ids)
def __iter__(self):
""" Return an iterator over ``self``. """
if len(self._ids) > PREFETCH_MAX and self._prefetch_ids is self._ids:
for ids in self.env.cr.split_for_in_conditions(self._ids):
for id_ in ids:
yield self._browse(self.env, (id_,), ids)
else:
for id in self._ids:
yield self._browse(self.env, (id,), self._prefetch_ids)
def __contains__(self, item):
""" Test whether ``item`` (record or field name) is an element of ``self``.
In the first case, the test is fully equivalent to::
any(item == record for record in self)
"""
if isinstance(item, BaseModel) and self._name == item._name:
return len(item) == 1 and item.id in self._ids
elif isinstance(item, str):
return item in self._fields
elif isinstance(item, BaseModel):
raise TypeError(f"cannot compare different models: '{self._name}()' and '{item._name}()'")
else:
raise TypeError(f"unsupported operand type(s) for \"in\": '{self._name}()' and '{type(item)}'")
def __add__(self, other):
""" Return the concatenation of two recordsets. """
return self.concat(other)
def concat(self, *args):
""" Return the concatenation of ``self`` with all the arguments (in
linear time complexity).
"""
ids = list(self._ids)
for arg in args:
if isinstance(arg, BaseModel) and arg._name == self._name:
ids.extend(arg._ids)
elif isinstance(arg, BaseModel):
raise TypeError(f"cannot concat different models: '{self._name}()' and '{arg._name}()'")
else:
raise TypeError(f"unsupported operand type(s) for \"concat\": '{self._name}()' and '{type(arg)}'")
return self.browse(ids)
def __sub__(self, other):
""" Return the recordset of all the records in ``self`` that are not in
``other``. Note that recordset order is preserved.
"""
if isinstance(other, BaseModel) and self._name == other._name:
other_ids = set(other._ids)
elif isinstance(other, BaseModel):
raise TypeError(f"cannot substract different models: '{self._name}()' and '{other._name}()'")
else:
raise TypeError(f"unsupported operand type(s) for \"-\": '{self._name}()' and '{type(other)}'")
return self.browse([id for id in self._ids if id not in other_ids])
def __and__(self, other):
""" Return the intersection of two recordsets.
Note that first occurrence order is preserved.
"""
if isinstance(other, BaseModel) and self._name == other._name:
other_ids = set(other._ids)
elif isinstance(other, BaseModel):
raise TypeError(f"cannot add different models: '{self._name}()' and '{other._name}()'")
else:
raise TypeError(f"unsupported operand type(s) for \"+\": '{self._name}()' and '{type(other)}'")
return self.browse(OrderedSet(id for id in self._ids if id in other_ids))
def __or__(self, other):
""" Return the union of two recordsets.
Note that first occurrence order is preserved.
"""
return self.union(other)
def union(self, *args):
""" Return the union of ``self`` with all the arguments (in linear time
complexity, with first occurrence order preserved).
"""
ids = list(self._ids)
for arg in args:
if isinstance(arg, BaseModel) and self._name == arg._name:
ids.extend(arg._ids)
elif isinstance(arg, BaseModel):
raise TypeError(f"cannot union different models: '{self._name}()' and '{arg._name}()'")
else:
raise TypeError(f"unsupported operand type(s) for \"union\": '{self._name}()' and '{type(arg)}'")
return self.browse(OrderedSet(ids))
def __eq__(self, other):
""" Test whether two recordsets are equivalent (up to reordering). """
if not isinstance(other, BaseModel):
if other:
filename, lineno = frame_codeinfo(currentframe(), 1)
_logger.warning("unsupported operand type(s) for \"==\": '%s()' == '%r' (%s:%s)",
self._name, other, filename, lineno)
return NotImplemented
return self._name == other._name and set(self._ids) == set(other._ids)
def __lt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
return set(self._ids) < set(other._ids)
def __le__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
# these are much cheaper checks than a proper subset check, so
# optimise for checking if a null or singleton are subsets of a
# recordset
if not self or self in other:
return True
return set(self._ids) <= set(other._ids)
def __gt__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
return set(self._ids) > set(other._ids)
def __ge__(self, other):
if not isinstance(other, BaseModel) or self._name != other._name:
return NotImplemented
if not other or other in self:
return True
return set(self._ids) >= set(other._ids)
def __int__(self):
return self.id or 0
def __repr__(self):
return "%s%r" % (self._name, getattr(self, '_ids', ""))
def __hash__(self):
if hasattr(self, '_ids'):
return hash((self._name, frozenset(self._ids)))
else:
return hash(self._name)
def __getitem__(self, key):
""" If ``key`` is an integer or a slice, return the corresponding record
selection as an instance (attached to ``self.env``).
Otherwise read the field ``key`` of the first record in ``self``.
Examples::
inst = model.search(dom) # inst is a recordset
r4 = inst[3] # fourth record in inst
rs = inst[10:20] # subset of inst
nm = rs['name'] # name of first record in inst
"""
if isinstance(key, str):
# important: one must call the field's getter
return self._fields[key].__get__(self, type(self))
elif isinstance(key, slice):
return self.browse(self._ids[key])
else:
return self.browse((self._ids[key],))
def __setitem__(self, key, value):
""" Assign the field ``key`` to ``value`` in record ``self``. """
# important: one must call the field's setter
return self._fields[key].__set__(self, value)
#
# Cache and recomputation management
#
@property
def _cache(self):
""" Return the cache of ``self``, mapping field names to values. """
return RecordCache(self)
def _in_cache_without(self, field, limit=PREFETCH_MAX):
""" Return records to prefetch that have no value in cache for ``field``
(:class:`Field` instance), including ``self``.
Return at most ``limit`` records.
"""
ids = expand_ids(self.id, self._prefetch_ids)
ids = self.env.cache.get_missing_ids(self.browse(ids), field)
if limit:
ids = itertools.islice(ids, limit)
# Those records are aimed at being either fetched, or computed. But the
# method '_fetch_field' is not correct with new records: it considers
# them as forbidden records, and clears their cache! On the other hand,
# compute methods are not invoked with a mix of real and new records for
# the sake of code simplicity.
return self.browse(ids)
@api.model
def refresh(self):
""" Clear the records cache.
.. deprecated:: 8.0
The record cache is automatically invalidated.
"""
self.invalidate_cache()
@api.model
def invalidate_cache(self, fnames=None, ids=None):
""" Invalidate the record caches after some records have been modified.
If both ``fnames`` and ``ids`` are ``None``, the whole cache is cleared.
:param fnames: the list of modified fields, or ``None`` for all fields
:param ids: the list of modified record ids, or ``None`` for all
"""
if fnames is None:
if ids is None:
return self.env.cache.invalidate()
fields = list(self._fields.values())
else:
fields = [self._fields[n] for n in fnames]
# invalidate fields and inverse fields, too
spec = [(f, ids) for f in fields] + \
[(invf, None) for f in fields for invf in self.pool.field_inverses[f]]
self.env.cache.invalidate(spec)
def modified(self, fnames, create=False, before=False):
""" Notify that fields will be or have been modified on ``self``. This
invalidates the cache where necessary, and prepares the recomputation of
dependent stored fields.
:param fnames: iterable of field names modified on records ``self``
:param create: whether called in the context of record creation
:param before: whether called before modifying records ``self``
"""
if not self or not fnames:
return
# The triggers of a field F is a tree that contains the fields that
# depend on F, together with the fields to inverse to find out which
# records to recompute.
#
# For instance, assume that G depends on F, H depends on X.F, I depends
# on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
#
# [G]
# X/ \Y
# [H] [J]
# W/
# [I]
#
# This tree provides perfect support for the trigger mechanism:
# when F is # modified on records,
# - mark G to recompute on records,
# - mark H to recompute on inverse(X, records),
# - mark I to recompute on inverse(W, inverse(X, records)),
# - mark J to recompute on inverse(Y, records).
if len(fnames) == 1:
tree = self.pool.field_triggers.get(self._fields[next(iter(fnames))])
else:
# merge dependency trees to evaluate all triggers at once
tree = {}
for fname in fnames:
node = self.pool.field_triggers.get(self._fields[fname])
if node:
trigger_tree_merge(tree, node)
if tree:
# determine what to compute (through an iterator)
tocompute = self.sudo().with_context(active_test=False)._modified_triggers(tree, create)
# When called after modification, one should traverse backwards
# dependencies by taking into account all fields already known to be
# recomputed. In that case, we mark fieds to compute as soon as
# possible.
#
# When called before modification, one should mark fields to compute
# after having inversed all dependencies. This is because we
# determine what currently depends on self, and it should not be
# recomputed before the modification!
if before:
tocompute = list(tocompute)
# process what to compute
for field, records, create in tocompute:
records -= self.env.protected(field)
if not records:
continue
if field.compute and field.store:
if field.recursive:
recursively_marked = self.env.not_to_compute(field, records)
self.env.add_to_compute(field, records)
else:
# Dont force the recomputation of compute fields which are
# not stored as this is not really necessary.
if field.recursive:
recursively_marked = records & self.env.cache.get_records(records, field)
self.env.cache.invalidate([(field, records._ids)])
# recursively trigger recomputation of field's dependents
if field.recursive:
recursively_marked.modified([field.name], create)
def _modified_triggers(self, tree, create=False):
""" Return an iterator traversing a tree of field triggers on ``self``,
traversing backwards field dependencies along the way, and yielding
tuple ``(field, records, created)`` to recompute.
"""
if not self:
return
# first yield what to compute
for field in tree.get(None, ()):
yield field, self, create
# then traverse dependencies backwards, and proceed recursively
for key, val in tree.items():
if key is None:
continue
elif create and key.type in ('many2one', 'many2one_reference'):
# upon creation, no other record has a reference to self
continue
else:
# val is another tree of dependencies
model = self.env[key.model_name]
for invf in model.pool.field_inverses[key]:
# use an inverse of field without domain
if not (invf.type in ('one2many', 'many2many') and invf.domain):
if invf.type == 'many2one_reference':
rec_ids = set()
for rec in self:
try:
if rec[invf.model_field] == key.model_name:
rec_ids.add(rec[invf.name])
except MissingError:
continue
records = model.browse(rec_ids)
else:
try:
records = self[invf.name]
except MissingError:
records = self.exists()[invf.name]
# TODO: find a better fix
if key.model_name == records._name:
if not any(self._ids):
# if self are new, records should be new as well
records = records.browse(it and NewId(it) for it in records._ids)
break
else:
new_records = self.filtered(lambda r: not r.id)
real_records = self - new_records
records = model.browse()
if real_records:
records |= model.search([(key.name, 'in', real_records.ids)], order='id')
if new_records:
cache_records = self.env.cache.get_records(model, key)
records |= cache_records.filtered(lambda r: set(r[key.name]._ids) & set(self._ids))
yield from records._modified_triggers(val)
@api.model
def recompute(self, fnames=None, records=None):
""" Recompute all function fields (or the given ``fnames`` if present).
The fields and records to recompute have been determined by method
:meth:`modified`.
"""
def process(field):
recs = self.env.records_to_compute(field)
if not recs:
return
if field.compute and field.store:
# do not force recomputation on new records; those will be
# recomputed by accessing the field on the records
recs = recs.filtered('id')
try:
field.recompute(recs)
except MissingError:
existing = recs.exists()
field.recompute(existing)
# mark the field as computed on missing records, otherwise
# they remain forever in the todo list, and lead to an
# infinite loop...
for f in recs.pool.field_computed[field]:
self.env.remove_to_compute(f, recs - existing)
else:
self.env.cache.invalidate([(field, recs._ids)])
self.env.remove_to_compute(field, recs)
if fnames is None:
# recompute everything
for field in list(self.env.fields_to_compute()):
process(field)
else:
fields = [self._fields[fname] for fname in fnames]
# check whether any 'records' must be computed
if records is not None and not any(
records & self.env.records_to_compute(field)
for field in fields
):
return
# recompute the given fields on self's model
for field in fields:
process(field)
#
# Generic onchange method
#
def _dependent_fields(self, field):
""" Return an iterator on the fields that depend on ``field``. """
def traverse(node):
for key, val in node.items():
if key is None:
yield from val
else:
yield from traverse(val)
return traverse(self.pool.field_triggers.get(field, {}))
def _has_onchange(self, field, other_fields):
""" Return whether ``field`` should trigger an onchange event in the
presence of ``other_fields``.
"""
return (field.name in self._onchange_methods) or any(
dep in other_fields for dep in self._dependent_fields(field.base_field)
)
@api.model
def _onchange_spec(self, view_info=None):
""" Return the onchange spec from a view description; if not given, the
result of ``self.fields_view_get()`` is used.
"""
result = {}
# for traversing the XML arch and populating result
def process(node, info, prefix):
if node.tag == 'field':
name = node.attrib['name']
names = "%s.%s" % (prefix, name) if prefix else name
if not result.get(names):
result[names] = node.attrib.get('on_change')
# traverse the subviews included in relational fields
for subinfo in info['fields'][name].get('views', {}).values():
process(etree.fromstring(subinfo['arch']), subinfo, names)
else:
for child in node:
process(child, info, prefix)
if view_info is None:
view_info = self.fields_view_get()
process(etree.fromstring(view_info['arch']), view_info, '')
return result
def _onchange_eval(self, field_name, onchange, result):
""" Apply onchange method(s) for field ``field_name`` with spec ``onchange``
on record ``self``. Value assignments are applied on ``self``, while
domain and warning messages are put in dictionary ``result``.
"""
onchange = onchange.strip()
def process(res):
if not res:
return
if res.get('value'):
res['value'].pop('id', None)
self.update({key: val for key, val in res['value'].items() if key in self._fields})
if res.get('domain'):
_logger.warning(
"onchange method %s returned a domain, this is deprecated",
method.__qualname__
)
result.setdefault('domain', {}).update(res['domain'])
if res.get('warning'):
result['warnings'].add((
res['warning'].get('title') or _("Warning"),
res['warning'].get('message') or "",
res['warning'].get('type') or "",
))
if onchange in ("1", "true"):
for method in self._onchange_methods.get(field_name, ()):
method_res = method(self)
process(method_res)
return
def onchange(self, values, field_name, field_onchange):
""" Perform an onchange on the given field.
:param values: dictionary mapping field names to values, giving the
current state of modification
:param field_name: name of the modified field, or list of field
names (in view order), or False
:param field_onchange: dictionary mapping field names to their
on_change attribute
When ``field_name`` is falsy, the method first adds default values
to ``values``, computes the remaining fields, applies onchange
methods to them, and return all the fields in ``field_onchange``.
"""
# this is for tests using `Form`
self.flush()
env = self.env
if isinstance(field_name, list):
names = field_name
elif field_name:
names = [field_name]
else:
names = []
first_call = not names
if any(name not in self._fields for name in names):
return {}
def PrefixTree(model, dotnames):
""" Return a prefix tree for sequences of field names. """
if not dotnames:
return {}
# group dotnames by prefix
suffixes = defaultdict(list)
for dotname in dotnames:
# name, *names = dotname.split('.', 1)
names = dotname.split('.', 1)
name = names.pop(0)
suffixes[name].extend(names)
# fill in prefix tree in fields order
tree = OrderedDict()
for name, field in model._fields.items():
if name in suffixes:
tree[name] = subtree = PrefixTree(model[name], suffixes[name])
if subtree and field.type == 'one2many':
subtree.pop(field.inverse_name, None)
return tree
class Snapshot(dict):
""" A dict with the values of a record, following a prefix tree. """
__slots__ = ()
def __init__(self, record, tree, fetch=True):
# put record in dict to include it when comparing snapshots
super(Snapshot, self).__init__({'<record>': record, '<tree>': tree})
if fetch:
for name in tree:
self.fetch(name)
def fetch(self, name):
""" Set the value of field ``name`` from the record's value. """
record = self['<record>']
tree = self['<tree>']
if record._fields[name].type in ('one2many', 'many2many'):
# x2many fields are serialized as a list of line snapshots
self[name] = [Snapshot(line, tree[name]) for line in record[name]]
else:
self[name] = record[name]
def has_changed(self, name):
""" Return whether a field on record has changed. """
if name not in self:
return True
record = self['<record>']
subnames = self['<tree>'][name]
if record._fields[name].type not in ('one2many', 'many2many'):
return self[name] != record[name]
return (
len(self[name]) != len(record[name])
or (
set(line_snapshot["<record>"].id for line_snapshot in self[name])
!= set(record[name]._ids)
)
or any(
line_snapshot.has_changed(subname)
for line_snapshot in self[name]
for subname in subnames
)
)
def diff(self, other, force=False):
""" Return the values in ``self`` that differ from ``other``.
Requires record cache invalidation for correct output!
"""
record = self['<record>']
result = {}
for name, subnames in self['<tree>'].items():
if name == 'id':
continue
if not force and other.get(name) == self[name]:
continue
field = record._fields[name]
if field.type not in ('one2many', 'many2many'):
result[name] = field.convert_to_onchange(self[name], record, {})
else:
# x2many fields: serialize value as commands
result[name] = commands = [Command.clear()]
# The purpose of the following line is to enable the prefetching.
# In the loop below, line._prefetch_ids actually depends on the
# value of record[name] in cache (see prefetch_ids on x2many
# fields). But the cache has been invalidated before calling
# diff(), therefore evaluating line._prefetch_ids with an empty
# cache simply returns nothing, which discards the prefetching
# optimization!
record._cache[name] = tuple(
line_snapshot['<record>'].id for line_snapshot in self[name]
)
for line_snapshot in self[name]:
line = line_snapshot['<record>']
line = line._origin or line
if not line.id:
# new line: send diff from scratch
line_diff = line_snapshot.diff({})
commands.append((Command.CREATE, line.id.ref or 0, line_diff))
else:
# existing line: check diff from database
# (requires a clean record cache!)
line_diff = line_snapshot.diff(Snapshot(line, subnames))
if line_diff:
# send all fields because the web client
# might need them to evaluate modifiers
line_diff = line_snapshot.diff({})
commands.append(Command.update(line.id, line_diff))
else:
commands.append(Command.link(line.id))
return result
nametree = PrefixTree(self.browse(), field_onchange)
if first_call:
names = [name for name in values if name != 'id']
missing_names = [name for name in nametree if name not in values]
defaults = self.default_get(missing_names)
for name in missing_names:
values[name] = defaults.get(name, False)
if name in defaults:
names.append(name)
# prefetch x2many lines: this speeds up the initial snapshot by avoiding
# to compute fields on new records as much as possible, as that can be
# costly and is not necessary at all
for name, subnames in nametree.items():
if subnames and values.get(name):
# retrieve all line ids in commands
line_ids = set()
for cmd in values[name]:
if cmd[0] in (Command.UPDATE, Command.LINK):
line_ids.add(cmd[1])
elif cmd[0] == Command.SET:
line_ids.update(cmd[2])
# prefetch stored fields on lines
lines = self[name].browse(line_ids)
fnames = [subname
for subname in subnames
if lines._fields[subname].base_field.store]
lines._read(fnames)
# copy the cache of lines to their corresponding new records;
# this avoids computing computed stored fields on new_lines
new_lines = lines.browse(map(NewId, line_ids))
cache = self.env.cache
for fname in fnames:
field = lines._fields[fname]
cache.update(new_lines, field, [
field.convert_to_cache(value, new_line, validate=False)
for value, new_line in zip(cache.get_values(lines, field), new_lines)
])
# Isolate changed values, to handle inconsistent data sent from the
# client side: when a form view contains two one2many fields that
# overlap, the lines that appear in both fields may be sent with
# different data. Consider, for instance:
#
# foo_ids: [line with value=1, ...]
# bar_ids: [line with value=1, ...]
#
# If value=2 is set on 'line' in 'bar_ids', the client sends
#
# foo_ids: [line with value=1, ...]
# bar_ids: [line with value=2, ...]
#
# The idea is to put 'foo_ids' in cache first, so that the snapshot
# contains value=1 for line in 'foo_ids'. The snapshot is then updated
# with the value of `bar_ids`, which will contain value=2 on line.
#
# The issue also occurs with other fields. For instance, an onchange on
# a move line has a value for the field 'move_id' that contains the
# values of the move, among which the one2many that contains the line
# itself, with old values!
#
changed_values = {name: values[name] for name in names}
# set changed values to null in initial_values; not setting them
# triggers default_get() on the new record when creating snapshot0
initial_values = dict(values, **dict.fromkeys(names, False))
# do not force delegate fields to False
for parent_name in self._inherits.values():
if not initial_values.get(parent_name, True):
initial_values.pop(parent_name)
# create a new record with values
record = self.new(initial_values, origin=self)
# make parent records match with the form values; this ensures that
# computed fields on parent records have all their dependencies at
# their expected value
for name in initial_values:
field = self._fields.get(name)
if field and field.inherited:
parent_name, name = field.related.split('.', 1)
record[parent_name]._update_cache({name: record[name]})
# make a snapshot based on the initial values of record
snapshot0 = Snapshot(record, nametree, fetch=(not first_call))
# store changed values in cache; also trigger recomputations based on
# subfields (e.g., line.a has been modified, line.b is computed stored
# and depends on line.a, but line.b is not in the form view)
record._update_cache(changed_values, validate=False)
# update snapshot0 with changed values
for name in names:
snapshot0.fetch(name)
# Determine which field(s) should be triggered an onchange. On the first
# call, 'names' only contains fields with a default. If 'self' is a new
# line in a one2many field, 'names' also contains the one2many's inverse
# field, and that field may not be in nametree.
todo = list(unique(itertools.chain(names, nametree))) if first_call else list(names)
done = set()
# mark fields to do as modified to trigger recomputations
protected = [self._fields[name] for name in names]
with self.env.protecting(protected, record):
record.modified(todo)
for name in todo:
field = self._fields[name]
if field.inherited:
# modifying an inherited field should modify the parent
# record accordingly; because we don't actually assign the
# modified field on the record, the modification on the
# parent record has to be done explicitly
parent = record[field.related.split('.')[0]]
parent[name] = record[name]
result = {'warnings': OrderedSet()}
# process names in order
while todo:
# apply field-specific onchange methods
for name in todo:
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
done.add(name)
# determine which fields to process for the next pass
todo = [
name
for name in nametree
if name not in done and snapshot0.has_changed(name)
]
if not env.context.get('recursive_onchanges', True):
todo = []
# make the snapshot with the final values of record
snapshot1 = Snapshot(record, nametree)
# determine values that have changed by comparing snapshots
self.invalidate_cache()
result['value'] = snapshot1.diff(snapshot0, force=first_call)
# format warnings
warnings = result.pop('warnings')
if len(warnings) == 1:
title, message, type = warnings.pop()
if not type:
type = 'dialog'
result['warning'] = dict(title=title, message=message, type=type)
elif len(warnings) > 1:
# concatenate warning titles and messages
title = _("Warnings")
message = '\n\n'.join([warn_title + '\n\n' + warn_message for warn_title, warn_message, warn_type in warnings])
result['warning'] = dict(title=title, message=message, type='dialog')
return result
def _get_placeholder_filename(self, field):
""" Returns the filename of the placeholder to use,
set on web/static/img by default, or the
complete path to access it (eg: module/path/to/image.png).
If a falsy value is returned, "ir.http"._placeholder() will use
the default placeholder 'web/static/img/placeholder.png'.
"""
return False
def _populate_factories(self):
""" Generates a factory for the different fields of the model.
``factory`` is a generator of values (dict of field values).
Factory skeleton::
def generator(iterator, field_name, model_name):
for counter, values in enumerate(iterator):
# values.update(dict())
yield values
See :mod:`odoo.tools.populate` for population tools and applications.
:returns: list of pairs(field_name, factory) where `factory` is a generator function.
:rtype: list(tuple(str, generator))
.. note::
It is the responsibility of the generator to handle the field_name correctly.
The generator could generate values for multiple fields together. In this case,
the field_name should be more a "field_group" (should be begin by a "_"), covering
the different fields updated by the generator (e.g. "_address" for a generator
updating multiple address fields).
"""
return []
@property
def _populate_sizes(self):
""" Return a dict mapping symbolic sizes (``'small'``, ``'medium'``, ``'large'``) to integers,
giving the minimal number of records that :meth:`_populate` should create.
The default population sizes are:
* ``small`` : 10
* ``medium`` : 100
* ``large`` : 1000
"""
return {
'small': 10, # minimal representative set
'medium': 100, # average database load
'large': 1000, # maxi database load
}
@property
def _populate_dependencies(self):
""" Return the list of models which have to be populated before the current one.
:rtype: list
"""
return []
def _populate(self, size):
""" Create records to populate this model.
:param str size: symbolic size for the number of records: ``'small'``, ``'medium'`` or ``'large'``
"""
batch_size = 1000
min_size = self._populate_sizes[size]
record_count = 0
create_values = []
complete = False
field_generators = self._populate_factories()
if not field_generators:
return self.browse() # maybe create an automatic generator?
records_batches = []
generator = populate.chain_factories(field_generators, self._name)
while record_count <= min_size or not complete:
values = next(generator)
complete = values.pop('__complete')
create_values.append(values)
record_count += 1
if len(create_values) >= batch_size:
_logger.info('Batch: %s/%s', record_count, min_size)
records_batches.append(self.create(create_values))
create_values = []
if create_values:
records_batches.append(self.create(create_values))
return self.concat(*records_batches)
collections.abc.Set.register(BaseModel)
# not exactly true as BaseModel doesn't have __reversed__, index or count
collections.abc.Sequence.register(BaseModel)
class RecordCache(MutableMapping):
""" A mapping from field names to values, to read and update the cache of a record. """
__slots__ = ['_record']
def __init__(self, record):
assert len(record) == 1, "Unexpected RecordCache(%s)" % record
self._record = record
def __contains__(self, name):
""" Return whether `record` has a cached value for field ``name``. """
field = self._record._fields[name]
return self._record.env.cache.contains(self._record, field)
def __getitem__(self, name):
""" Return the cached value of field ``name`` for `record`. """
field = self._record._fields[name]
return self._record.env.cache.get(self._record, field)
def __setitem__(self, name, value):
""" Assign the cached value of field ``name`` for ``record``. """
field = self._record._fields[name]
self._record.env.cache.set(self._record, field, value)
def __delitem__(self, name):
""" Remove the cached value of field ``name`` for ``record``. """
field = self._record._fields[name]
self._record.env.cache.remove(self._record, field)
def __iter__(self):
""" Iterate over the field names with a cached value. """
for field in self._record.env.cache.get_fields(self._record):
yield field.name
def __len__(self):
""" Return the number of fields with a cached value. """
return sum(1 for name in self)
AbstractModel = BaseModel
class Model(AbstractModel):
""" Main super-class for regular database-persisted Odoo models.
Odoo models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True # automatically create database backend
_register = False # not visible in ORM registry, meant to be python-inherited only
_abstract = False # not abstract
_transient = False # not transient
class TransientModel(Model):
""" Model super-class for transient records, meant to be temporarily
persistent, and regularly vacuum-cleaned.
A TransientModel has a simplified access rights management, all users can
create new records, and may only access the records they created. The
superuser has unrestricted access to all TransientModel records.
"""
_auto = True # automatically create database backend
_register = False # not visible in ORM registry, meant to be python-inherited only
_abstract = False # not abstract
_transient = True # transient
@api.autovacuum
def _transient_vacuum(self):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
if self._transient_max_hours:
# Age-based expiration
self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)
if self._transient_max_count:
# Count-based expiration
self._transient_clean_old_rows(self._transient_max_count)
def _transient_clean_old_rows(self, max_count):
# Check how many rows we have in the table
query = 'SELECT count(*) FROM "{}"'.format(self._table)
self._cr.execute(query)
[count] = self._cr.fetchone()
if count > max_count:
self._transient_clean_rows_older_than(300)
def _transient_clean_rows_older_than(self, seconds):
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = """
SELECT id FROM "{}"
WHERE COALESCE(write_date, create_date, (now() AT TIME ZONE 'UTC'))::timestamp
< (now() AT TIME ZONE 'UTC') - interval %s
""".format(self._table)
self._cr.execute(query, ["%s seconds" % seconds])
ids = [x[0] for x in self._cr.fetchall()]
self.sudo().browse(ids).unlink()
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_not_null(model, fields, info, e):
if e.diag.table_name != model._table:
return {'message': _(u"Missing required value for the field '%s'") % (e.diag.column_name)}
field_name = e.diag.column_name
field = fields[field_name]
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_unique(model, fields, info, e):
# new cursor since we're probably in an error handler in a blown
# transaction which may not have been rollbacked/cleaned yet
with closing(model.env.registry.cursor()) as cr_tmp:
cr_tmp.execute("""
SELECT
conname AS "constraint name",
t.relname AS "table name",
ARRAY(
SELECT attname FROM pg_attribute
WHERE attrelid = conrelid
AND attnum = ANY(conkey)
) as "columns"
FROM pg_constraint
JOIN pg_class t ON t.oid = conrelid
WHERE conname = %s
""", [e.diag.constraint_name])
constraint, table, ufields = cr_tmp.fetchone() or (None, None, None)
# if the unique constraint is on an expression or on an other table
if not ufields or model._table != table:
return {'message': tools.ustr(e)}
# TODO: add stuff from e.diag.message_hint? provides details about the constraint & duplication values but may be localized...
if len(ufields) == 1:
field_name = ufields[0]
field = fields[field_name]
message = _(u"The value for the field '%s' already exists (this is probably '%s' in the current model).") % (field_name, field['string'])
return {
'message': message,
'field': field_name,
}
field_strings = [fields[fname]['string'] for fname in ufields]
message = _(u"The values for the fields '%s' already exist (they are probably '%s' in the current model).") % (', '.join(ufields), ', '.join(field_strings))
return {
'message': message,
# no field, unclear which one we should pick and they could be in any order
}
def convert_pgerror_constraint(model, fields, info, e):
sql_constraints = dict([(('%s_%s') % (e.diag.table_name, x[0]), x) for x in model._sql_constraints])
if e.diag.constraint_name in sql_constraints.keys():
return {'message': "'%s'" % sql_constraints[e.diag.constraint_name][2]}
return {'message': tools.ustr(e)}
PGERROR_TO_OE = defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': tools.ustr(pgerror)}), {
'23502': convert_pgerror_not_null,
'23505': convert_pgerror_unique,
'23514': convert_pgerror_constraint,
})
def lazy_name_get(self):
""" Evaluate self.name_get() lazily. """
names = tools.lazy(lambda: dict(self.name_get()))
return [(rid, tools.lazy(operator.getitem, names, rid)) for rid in self.ids]
# keep those imports here to avoid dependency cycle errors
# pylint: disable=wrong-import-position
from . import fields
from .osv import expression
from .fields import Field, Datetime, Command
| 44.116862 | 300,877 |
177,441 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" High-level objects for fields. """
from collections import defaultdict
from datetime import date, datetime, time
from operator import attrgetter
from xmlrpc.client import MAXINT
import base64
import binascii
import enum
import itertools
import logging
import warnings
from markupsafe import Markup
import psycopg2
import pytz
from .tools import (
float_repr, float_round, float_compare, float_is_zero, html_sanitize, human_size,
pg_varchar, ustr, OrderedSet, pycompat, sql, date_utils, unique, IterableGenerator,
image_process, merge_sequences,
)
from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from .tools.translate import html_translate, _
from .tools.mimetypes import guess_mimetype
from odoo.exceptions import CacheMiss
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
# hacky-ish way to prevent access to a field through the ORM (except for sudo mode)
NO_ACCESS='.'
IR_MODELS = (
'ir.model', 'ir.model.data', 'ir.model.fields', 'ir.model.fields.selection',
'ir.model.relation', 'ir.model.constraint', 'ir.module.module',
)
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__[:-7] + '.schema')
Default = object() # default value for __init__() methods
def first(records):
""" Return the first record in ``records``, with the same prefetching. """
return next(iter(records)) if len(records) > 1 else records
def resolve_mro(model, name, predicate):
""" Return the list of successively overridden values of attribute ``name``
in mro order on ``model`` that satisfy ``predicate``. Model registry
classes are ignored.
"""
result = []
for cls in type(model).mro():
if not is_registry_class(cls):
value = cls.__dict__.get(name, Default)
if value is Default:
continue
if not predicate(value):
break
result.append(value)
return result
def determine(needle, records, *args):
""" Simple helper for calling a method given as a string or a function.
:param needle: callable or name of method to call on ``records``
:param BaseModel records: recordset to call ``needle`` on or with
:params args: additional arguments to pass to the determinant
:returns: the determined value if the determinant is a method name or callable
:raise TypeError: if ``records`` is not a recordset, or ``needle`` is not
a callable or valid method name
"""
if not isinstance(records, BaseModel):
raise TypeError("Determination requires a subject recordset")
if isinstance(needle, str):
needle = getattr(records, needle)
if needle.__name__.find('__'):
return needle(*args)
elif callable(needle):
if needle.__name__.find('__'):
return needle(records, *args)
raise TypeError("Determination requires a callable or method name")
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'type'):
return
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
_global_seq = iter(itertools.count())
class Field(MetaField('DummyField', (object,), {})):
"""The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instantiating a field:
:param str string: the label of the field seen by users; if not
set, the ORM takes the field name in the class (capitalized).
:param str help: the tooltip of the field seen by users
:param invisible: whether the field is invisible (boolean, by default ``False``)
:param bool readonly: whether the field is readonly (default: ``False``)
This only has an impact on the UI. Any field assignation in code will work
(if the field is a stored field or an inversable one).
:param bool required: whether the value of the field is required (default: ``False``)
:param bool index: whether the field is indexed in database. Note: no effect
on non-stored and virtual fields. (default: ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value; use
``default=None`` to discard default values for the field
:type default: value or callable
:param dict states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: ``readonly``, ``required``, ``invisible``.
.. warning:: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param str groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool company_dependent: whether the field value is dependent of the current company;
The value isn't stored on the model table. It is registered as `ir.property`.
When the value of the company_dependent field is needed, an `ir.property`
is searched, linked to the current company (and current record if one property
exists).
If the value is changed on the record, it either modifies the existing property
for the current record (if one exists), or creates a new one for the current company
and res_id.
If the value is changed on the company side, it will impact all records on which
the value hasn't been changed.
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param bool store: whether the field is stored in database
(default:``True``, ``False`` for computed fields)
:param str group_operator: aggregate function used by :meth:`~odoo.models.Model.read_group`
when grouping on this field.
Supported aggregate functions are:
* ``array_agg`` : values, including nulls, concatenated into an array
* ``count`` : number of rows
* ``count_distinct`` : number of distinct rows
* ``bool_and`` : true if all values are true, otherwise false
* ``bool_or`` : true if at least one value is true, otherwise false
* ``max`` : maximum value of all values
* ``min`` : minimum value of all values
* ``avg`` : the average (arithmetic mean) of all values
* ``sum`` : sum of all values
:param str group_expand: function used to expand read_group results when grouping on
the current field.
.. code-block:: python
@api.model
def _read_group_selection_field(self, values, domain, order):
return ['choice1', 'choice2', ...] # available selection choices.
@api.model
def _read_group_many2one_field(self, records, domain, order):
return records + self.search([custom_domain])
.. rubric:: Computed Fields
:param str compute: name of a method that computes the field
.. seealso:: :ref:`Advanced Fields/Compute fields <reference/fields/compute>`
:param bool compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (by default ``True`` for stored fields, ``False``
for non stored fields)
:param bool recursive: whether the field has recursive dependencies (the field
``X`` has a dependency like ``parent_id.X``); declaring a field recursive
must be explicit to guarantee that recomputation is correct
:param str inverse: name of a method that inverses the field (optional)
:param str search: name of a method that implement search on the field (optional)
:param str related: sequence of field names
.. seealso:: :ref:`Advanced fields/Related fields <reference/fields/related>`
"""
type = None # type of the field (string)
relational = False # whether the field is a relational one
translate = False # whether the field is translated
column_type = None # database column type (ident, spec)
column_format = '%s' # placeholder for value in queries
column_cast_from = () # column types that may be cast to this
write_sequence = 0 # field ordering for write()
args = None # the parameters given to __init__()
_module = None # the field's module name
_modules = None # modules that define this field
_setup_done = True # whether the field is completely set up
_sequence = None # absolute ordering of the field
_base_fields = () # the fields defining self, in override order
_extra_keys = () # unknown attributes set on the field
_direct = False # whether self may be used directly (shared)
_toplevel = False # whether self is on the model's registry class
automatic = False # whether the field is automatically created ("magic" field)
inherited = False # whether the field is inherited (_inherits)
inherited_field = None # the corresponding inherited field
name = None # name of the field
model_name = None # name of the model of this field
comodel_name = None # name of the model of values (if relational)
store = True # whether the field is stored in database
index = False # whether the field is indexed in database
manual = False # whether the field is a custom field
copy = True # whether the field is copied over by BaseModel.copy()
_depends = None # collection of field dependencies
_depends_context = None # collection of context key dependencies
recursive = False # whether self depends on itself
compute = None # compute(recs) computes field on recs
compute_sudo = False # whether field should be recomputed as superuser
inverse = None # inverse(recs) inverses field on recs
search = None # search(recs, operator, value) searches on self
related = None # sequence of field names, for related fields
company_dependent = False # whether ``self`` is company-dependent (property field)
default = None # default(recs) returns the default value
string = None # field label
help = None # field tooltip
invisible = False # whether the field is invisible
readonly = False # whether the field is readonly
required = False # whether the field is required
states = None # set readonly and required depending on state
groups = None # csv list of group xml ids
change_default = False # whether the field may trigger a "user-onchange"
deprecated = None # whether the field is deprecated
related_field = None # corresponding related field
group_operator = None # operator for aggregating values
group_expand = None # name of method to expand groups in read_group()
prefetch = True # whether the field is prefetched
def __init__(self, string=Default, **kwargs):
kwargs['string'] = string
self._sequence = next(_global_seq)
self.args = {key: val for key, val in kwargs.items() if val is not Default}
def __str__(self):
if self.name is None:
return "<%s.%s>" % (__name__, type(self).__name__)
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
if self.name is None:
return "<%s.%s>" % (__name__, type(self).__name__)
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Base field setup: things that do not depend on other models/fields
#
# The base field setup is done by field.__set_name__(), which determines the
# field's name, model name, module and its parameters.
#
# The dictionary field.args gives the parameters passed to the field's
# constructor. Most parameters have an attribute of the same name on the
# field. The parameters as attributes are assigned by the field setup.
#
# When several definition classes of the same model redefine a given field,
# the field occurrences are "merged" into one new field instantiated at
# runtime on the registry class of the model. The occurrences of the field
# are given to the new field as the parameter '_base_fields'; it is a list
# of fields in override order (or reverse MRO).
#
# In order to save memory, a field should avoid having field.args and/or
# many attributes when possible. We call "direct" a field that can be set
# up directly from its definition class. Direct fields are non-related
# fields defined on models, and can be shared across registries. We call
# "toplevel" a field that is put on the model's registry class, and is
# therefore specific to the registry.
#
# Toplevel field are set up once, and are no longer set up from scratch
# after that. Those fields can save memory by discarding field.args and
# field._base_fields once set up, because those are no longer necessary.
#
# Non-toplevel non-direct fields are the fields on definition classes that
# may not be shared. In other words, those fields are never used directly,
# and are always recreated as toplevel fields. On those fields, the base
# setup is useless, because only field.args is used for setting up other
# fields. We therefore skip the base setup for those fields. The only
# attributes of those fields are: '_sequence', 'args', 'model_name', 'name'
# and '_module', which makes their __dict__'s size minimal.
def __set_name__(self, owner, name):
""" Perform the base setup of a field.
:param owner: the owner class of the field (the model's definition or registry class)
:param name: the name of the field
"""
assert issubclass(owner, BaseModel)
self.model_name = owner._name
self.name = name
if is_definition_class(owner):
# only for fields on definition classes, not registry classes
self._module = owner._module
owner._field_definitions.append(self)
if not self.args.get('related'):
self._direct = True
if self._direct or self._toplevel:
self._setup_attrs(owner, name)
if self._toplevel:
# free memory, self.args and self._base_fields are no longer useful
self.__dict__.pop('args', None)
self.__dict__.pop('_base_fields', None)
#
# Setup field parameter attributes
#
def _get_attrs(self, model_class, name):
""" Return the field parameter attributes as a dictionary. """
# determine all inherited field attributes
attrs = {}
modules = []
for field in self.args.get('_base_fields', ()):
if not isinstance(self, type(field)):
# 'self' overrides 'field' and their types are not compatible;
# so we ignore all the parameters collected so far
attrs.clear()
modules.clear()
continue
attrs.update(field.args)
if field._module:
modules.append(field._module)
attrs.update(self.args)
if self._module:
modules.append(self._module)
attrs['args'] = self.args
attrs['model_name'] = model_class._name
attrs['name'] = name
attrs['_module'] = modules[-1] if modules else None
attrs['_modules'] = tuple(set(modules))
# initialize ``self`` with ``attrs``
if name == 'state':
# by default, `state` fields should be reset on copy
attrs['copy'] = attrs.get('copy', False)
if attrs.get('compute'):
# by default, computed fields are not stored, computed in superuser
# mode if stored, not copied (unless stored and explicitly not
# readonly), and readonly (unless inversible)
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', store)
if not (attrs['store'] and not attrs.get('readonly', True)):
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
if attrs.get('related'):
# by default, related fields are not stored, computed in superuser
# mode, not copied and readonly
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', attrs.get('related_sudo', True))
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', True)
if attrs.get('company_dependent'):
# by default, company-dependent fields are not stored, not computed
# in superuser mode and not copied
attrs['store'] = False
attrs['compute_sudo'] = attrs.get('compute_sudo', False)
attrs['copy'] = attrs.get('copy', False)
attrs['default'] = attrs.get('default', self._default_company_dependent)
attrs['compute'] = self._compute_company_dependent
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_company_dependent
attrs['search'] = self._search_company_dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('company',)
if attrs.get('translate'):
# by default, translatable fields are context-dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('lang',)
# parameters 'depends' and 'depends_context' are stored in attributes
# '_depends' and '_depends_context', respectively
if 'depends' in attrs:
attrs['_depends'] = tuple(attrs.pop('depends'))
if 'depends_context' in attrs:
attrs['_depends_context'] = tuple(attrs.pop('depends_context'))
return attrs
def _setup_attrs(self, model_class, name):
""" Initialize the field parameter attributes. """
attrs = self._get_attrs(model_class, name)
# determine parameters that must be validated
extra_keys = [key for key in attrs if not hasattr(self, key)]
if extra_keys:
attrs['_extra_keys'] = extra_keys
self.__dict__.update(attrs)
# prefetch only stored, column, non-manual and non-deprecated fields
if not (self.store and self.column_type) or self.manual or self.deprecated:
self.prefetch = False
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = (
name[:-4] if name.endswith('_ids') else
name[:-3] if name.endswith('_id') else name
).replace('_', ' ').title()
# self.default must be either None or a callable
if self.default is not None and not callable(self.default):
value = self.default
self.default = lambda model: value
############################################################################
#
# Complete field setup: everything else
#
def prepare_setup(self):
self._setup_done = False
def setup(self, model):
""" Perform the complete setup of a field. """
if not self._setup_done:
# validate field params
for key in self._extra_keys:
if not model._valid_field_parameter(self, key):
_logger.warning(
"Field %s: unknown parameter %r, if this is an actual"
" parameter you may want to override the method"
" _valid_field_parameter on the relevant model in order to"
" allow it",
self, key
)
if self.related:
self.setup_related(model)
else:
self.setup_nonrelated(model)
self._setup_done = True
#
# Setup of non-related fields
#
def setup_nonrelated(self, model):
""" Determine the dependencies and inverse field(s) of ``self``. """
pass
def get_depends(self, model):
""" Return the field's dependencies and cache dependencies. """
if self._depends is not None:
# the parameter 'depends' has priority over 'depends' on compute
return self._depends, self._depends_context or ()
if self.related:
if self._depends_context is not None:
depends_context = self._depends_context
else:
related_model = model.env[self.related_field.model_name]
depends, depends_context = self.related_field.get_depends(related_model)
return [self.related], depends_context
if not self.compute:
return (), self._depends_context or ()
# determine the functions implementing self.compute
if isinstance(self.compute, str):
funcs = resolve_mro(model, self.compute, callable)
else:
funcs = [self.compute]
# collect depends and depends_context
depends = []
depends_context = list(self._depends_context or ())
for func in funcs:
deps = getattr(func, '_depends', ())
depends.extend(deps(model) if callable(deps) else deps)
depends_context.extend(getattr(func, '_depends_context', ()))
# display_name may depend on context['lang'] (`test_lp1071710`)
if self.automatic and self.name == 'display_name' and model._rec_name:
if model._fields[model._rec_name].base_field.translate:
if 'lang' not in depends_context:
depends_context.append('lang')
return depends, depends_context
#
# Setup of related fields
#
def setup_related(self, model):
""" Setup the attributes of a related field. """
assert isinstance(self.related, str), self.related
# determine the chain of fields, and make sure they are all set up
model_name = self.model_name
for name in self.related.split('.'):
field = model.pool[model_name]._fields.get(name)
if field is None:
raise KeyError(
f"Field {name} referenced in related field definition {self} does not exist."
)
if not field._setup_done:
field.setup(model.env[model_name])
model_name = field.comodel_name
self.related_field = field
# check type consistency
if self.type != field.type:
raise TypeError("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
self.compute = self._compute_related
if self.inherited or not (self.readonly or field.readonly):
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# A readonly related field without an inverse method should not have a
# default value, as it does not make sense.
if self.default and self.readonly and not self.inverse:
_logger.warning("Redundant default on %s", self)
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
# check whether 'attr' is explicitly set on self (from its field
# definition), and ignore its class-level value (only a default)
if attr not in self.__dict__ and prop.startswith('_related_'):
setattr(self, attr, getattr(field, prop))
for attr in field._extra_keys:
if not hasattr(self, attr) and model._valid_field_parameter(self, attr):
setattr(self, attr, getattr(field, attr))
# special cases of inherited fields
if self.inherited:
self.inherited_field = field
if not self.states:
self.states = field.states
if field.required:
self.required = True
# add modules from delegate and target fields; the first one ensures
# that inherited fields introduced via an abstract model (_inherits
# being on the abstract model) are assigned an XML id
delegate_field = model._fields[self.related.split('.')[0]]
self._modules = tuple({*self._modules, *delegate_field._modules, *field._modules})
def traverse_related(self, record):
""" Traverse the fields of the related field `self` except for the last
one, and return it as a pair `(last_record, last_field)`. """
for name in self.related.split('.')[:-1]:
record = first(record[name])
return record, self.related_field
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
#
# Traverse fields one by one for all records, in order to take advantage
# of prefetching for each field access. In order to clarify the impact
# of the algorithm, consider traversing 'foo.bar' for records a1 and a2,
# where 'foo' is already present in cache for a1, a2. Initially, both a1
# and a2 are marked for prefetching. As the commented code below shows,
# traversing all fields one record at a time will fetch 'bar' one record
# at a time.
#
# b1 = a1.foo # mark b1 for prefetching
# v1 = b1.bar # fetch/compute bar for b1
# b2 = a2.foo # mark b2 for prefetching
# v2 = b2.bar # fetch/compute bar for b2
#
# On the other hand, traversing all records one field at a time ensures
# maximal prefetching for each field access.
#
# b1 = a1.foo # mark b1 for prefetching
# b2 = a2.foo # mark b2 for prefetching
# v1 = b1.bar # fetch/compute bar for b1, b2
# v2 = b2.bar # value already in cache
#
# This difference has a major impact on performance, in particular in
# the case where 'bar' is a computed field that takes advantage of batch
# computation.
#
values = list(records)
for name in self.related.split('.')[:-1]:
try:
values = [first(value[name]) for value in values]
except AccessError as e:
description = records.env['ir.model']._get(records._name).name
raise AccessError(
_("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
'previous_message': e.args[0],
'document_kind': description,
'document_model': records._name,
}
)
# assign final values to records
for record, value in zip(records, values):
record[self.name] = self._process_related(value[self.related_field.name])
def _process_related(self, value):
"""No transformation by default, but allows override."""
return value
def _inverse_related(self, records):
""" Inverse the related field ``self`` on ``records``. """
# store record values, otherwise they may be lost by cache invalidation!
record_value = {record: record[self.name] for record in records}
for record in records:
target, field = self.traverse_related(record)
# update 'target' only if 'record' and 'target' are both real or
# both new (see `test_base_objects.py`, `test_basic`)
if target and bool(target.id) == bool(record.id):
target[field.name] = record_value[record]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field ``self``. """
return [(self.related, operator, value)]
# properties used by setup_related() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_groups = property(attrgetter('groups'))
_related_group_operator = property(attrgetter('group_operator'))
@property
def base_field(self):
""" Return the base field of an inherited field, or ``self``. """
return self.inherited_field.base_field if self.inherited_field else self
@property
def groupable(self):
"""
Return whether the field may be used for grouping in :meth:`~odoo.models.BaseModel.read_group`.
"""
return self.store and self.column_type
#
# Company-dependent fields
#
def _default_company_dependent(self, model):
return model.env['ir.property']._get(self.name, self.model_name)
def _compute_company_dependent(self, records):
# read property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = Property._get_multi(self.name, self.model_name, records.ids)
for record in records:
record[self.name] = values.get(record.id)
def _inverse_company_dependent(self, records):
# update property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = {
record.id: self.convert_to_write(record[self.name], record)
for record in records
}
Property._set_multi(self.name, self.model_name, values)
def _search_company_dependent(self, records, operator, value):
Property = records.env['ir.property'].sudo()
return Property.search_multi(self.name, self.model_name, operator, value)
#
# Setup of field triggers
#
def resolve_depends(self, registry):
""" Return the dependencies of `self` as a collection of field tuples. """
Model0 = registry[self.model_name]
for dotnames in registry.field_depends[self]:
field_seq = []
model_name = self.model_name
for index, fname in enumerate(dotnames.split('.')):
Model = registry[model_name]
if Model0._transient and not Model._transient:
# modifying fields on regular models should not trigger
# recomputations of fields on transient models
break
try:
field = Model._fields[fname]
except KeyError:
raise ValueError(
f"Wrong @depends on '{self.compute}' (compute method of field {self}). "
f"Dependency field '{fname}' not found in model {model_name}."
)
if field is self and index and not self.recursive:
self.recursive = True
warnings.warn(f"Field {self} should be declared with recursive=True")
field_seq.append(field)
# do not make self trigger itself: for instance, a one2many
# field line_ids with domain [('foo', ...)] will have
# 'line_ids.foo' as a dependency
if not (field is self and not index):
yield tuple(field_seq)
if field.type in ('one2many', 'many2many'):
for inv_field in Model.pool.field_inverses[field]:
yield tuple(field_seq) + (inv_field,)
model_name = field.comodel_name
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field ``self``. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
if not prop.startswith('_description_'):
continue
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
_description_group_operator = property(attrgetter('group_operator'))
def _description_depends(self, env):
return env.registry.field_depends[self]
@property
def _description_searchable(self):
return bool(self.store or self.search)
@property
def _description_sortable(self):
return (self.column_type and self.store) or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
model_name = self.base_field.model_name
field_string = env['ir.translation'].get_field_string(model_name)
return field_string.get(self.name) or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
model_name = self.base_field.model_name
field_help = env['ir.translation'].get_field_help(model_name)
return field_help.get(self.name) or self.help
return self.help
def is_editable(self):
""" Return whether the field can be editable in a view. """
return not self.readonly or self.states and any(
'readonly' in item for items in self.states.values() for item in items
)
############################################################################
#
# Conversion of values
#
def null(self, record):
""" Return the null value for this field in the record format. """
return False
def convert_to_column(self, value, record, values=None, validate=True):
""" Convert ``value`` from the ``write`` format to the SQL format. """
if value is None or value is False:
return None
return pycompat.to_text(value)
def convert_to_cache(self, value, record, validate=True):
""" Convert ``value`` to the cache format; ``value`` may come from an
assignment, or have the format of methods :meth:`BaseModel.read` or
:meth:`BaseModel.write`. If the value represents a recordset, it should
be added for prefetching on ``record``.
:param bool validate: when True, field-specific validation of ``value``
will be performed
"""
return value
def convert_to_record(self, value, record):
""" Convert ``value`` from the cache format to the record format.
If the value represents a recordset, it should share the prefetching of
``record``.
"""
return False if value is None else value
def convert_to_record_multi(self, values, records):
""" Convert a list of values from the cache format to the record format.
Some field classes may override this method to add optimizations for
batch processing.
"""
# spare the method lookup overhead
convert = self.convert_to_record
return [convert(value, records) for value in values]
def convert_to_read(self, value, record, use_name_get=True):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.read`.
:param bool use_name_get: when True, the value's display name will be
computed using :meth:`BaseModel.name_get`, if relevant for the field
"""
return False if value is None else value
def convert_to_write(self, value, record):
""" Convert ``value`` from any format to the format of method
:meth:`BaseModel.write`.
"""
cache_value = self.convert_to_cache(value, record, validate=False)
record_value = self.convert_to_record(cache_value, record)
return self.convert_to_read(record_value, record)
def convert_to_onchange(self, value, record, names):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.onchange`.
:param names: a tree of field names (for relational fields only)
"""
return self.convert_to_read(value, record)
def convert_to_export(self, value, record):
""" Convert ``value`` from the record format to the export format. """
if not value:
return ''
return value
def convert_to_display_name(self, value, record):
""" Convert ``value`` from the record format to a suitable display name. """
return ustr(value)
############################################################################
#
# Update database schema
#
def update_db(self, model, columns):
""" Update the database schema to implement this field.
:param model: an instance of the field's model
:param columns: a dict mapping column names to their configuration in database
:return: ``True`` if the field must be recomputed on existing rows
"""
if not self.column_type:
return
column = columns.get(self.name)
# create/update the column, not null constraint; the index will be
# managed by registry.check_indexes()
self.update_db_column(model, column)
self.update_db_notnull(model, column)
# optimization for computing simple related fields like 'foo_id.bar'
if (
not column
and self.related and self.related.count('.') == 1
and self.related_field.store and not self.related_field.compute
and not (self.related_field.type == 'binary' and self.related_field.attachment)
and self.related_field.type not in ('one2many', 'many2many')
):
join_field = model._fields[self.related.split('.')[0]]
if (
join_field.type == 'many2one'
and join_field.store and not join_field.compute
):
model.pool.post_init(self.update_db_related, model)
# discard the "classical" computation
return False
return not column
def update_db_column(self, model, column):
""" Create/update the column corresponding to ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
if not column:
# the column does not exist, create it
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
return
if column['udt_name'] == self.column_type[0]:
return
if column['udt_name'] in self.column_cast_from:
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
else:
newname = (self.name + '_moved{}').format
i = 0
while sql.column_exists(model._cr, model._table, newname(i)):
i += 1
if column['is_nullable'] == 'NO':
sql.drop_not_null(model._cr, model._table, self.name)
sql.rename_column(model._cr, model._table, self.name, newname(i))
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
def update_db_notnull(self, model, column):
""" Add or remove the NOT NULL constraint on ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
has_notnull = column and column['is_nullable'] == 'NO'
if not column or (self.required and not has_notnull):
# the column is new or it becomes required; initialize its values
if model._table_has_rows():
model._init_column(self.name)
if self.required and not has_notnull:
# _init_column may delay computations in post-init phase
@model.pool.post_init
def add_not_null():
# flush values before adding NOT NULL constraint
model.flush([self.name])
model.pool.post_constraint(apply_required, model, self.name)
elif not self.required and has_notnull:
sql.drop_not_null(model._cr, model._table, self.name)
def update_db_related(self, model):
""" Compute a stored related field directly in SQL. """
comodel = model.env[self.related_field.model_name]
join_field, comodel_field = self.related.split('.')
model.env.cr.execute("""
UPDATE "{model_table}" AS x
SET "{model_field}" = y."{comodel_field}"
FROM "{comodel_table}" AS y
WHERE x."{join_field}" = y.id
""".format(
model_table=model._table,
model_field=self.name,
comodel_table=comodel._table,
comodel_field=comodel_field,
join_field=join_field,
))
############################################################################
#
# Alternatively stored fields: if fields don't have a `column_type` (not
# stored as regular db columns) they go through a read/create/write
# protocol instead
#
def read(self, records):
""" Read the value of ``self`` on ``records``, and store it in cache. """
raise NotImplementedError("Method read() undefined on %s" % self)
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
for record, value in record_values:
self.write(record, value)
def write(self, records, value):
""" Write the value of ``self`` on ``records``. This method must update
the cache and prepare database updates.
:param value: a value in any format
:return: the subset of `records` that have been modified
"""
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
record = records[:1]
write_value = self.convert_to_write(cache_value, record)
column_value = self.convert_to_column(write_value, record)
for record in records.filtered('id'):
towrite[record.id][self.name] = column_value
return records
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field ``self`` on ``record`` """
if record is None:
return self # the field is accessed through the owner class
if not record._ids:
# null record -> return the null value for this field
value = self.convert_to_cache(False, record, validate=False)
return self.convert_to_record(value, record)
env = record.env
# only a single record may be accessed
record.ensure_one()
if self.compute and self.store:
# process pending computations
self.recompute(record)
try:
value = env.cache.get(record, self)
except KeyError:
# behavior in case of cache miss:
#
# on a real record:
# stored -> fetch from database (computation done above)
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/ origin:
# stored and not (computed and readonly) -> fetch from origin
# stored and computed and readonly -> compute
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/o origin:
# stored and computed -> compute
# stored and not computed -> new delegate or default
# not stored and computed -> compute
# not stored and not computed -> default
#
if self.store and record.id:
# real record: fetch from database
recs = record._in_cache_without(self)
try:
recs._fetch_field(self)
except AccessError:
record._fetch_field(self)
if not env.cache.contains(record, self):
raise MissingError("\n".join([
_("Record does not exist or has been deleted."),
_("(Record: %s, User: %s)") % (record, env.uid),
]))
value = env.cache.get(record, self)
elif self.store and record._origin and not (self.compute and self.readonly):
# new record with origin: fetch from origin
value = self.convert_to_cache(record._origin[self.name], record)
env.cache.set(record, self, value)
elif self.compute:
# non-stored field or new record without origin: compute
if env.is_protected(self, record):
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
else:
recs = record if self.recursive else record._in_cache_without(self)
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
try:
value = env.cache.get(record, self)
except CacheMiss:
if self.readonly and not self.store:
raise ValueError("Compute method failed to assign %s.%s" % (record, self.name))
# fallback to null value if compute gives nothing
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
elif self.type == 'many2one' and self.delegate and not record.id:
# parent record of a new record: new record, with the same
# values as record for the corresponding inherited fields
def is_inherited_field(name):
field = record._fields[name]
return field.inherited and field.related.split('.')[0] == self.name
parent = record.env[self.comodel_name].new({
name: value
for name, value in record._cache.items()
if is_inherited_field(name)
})
# in case the delegate field has inverse one2many fields, this
# updates the inverse fields as well
record._update_cache({self.name: parent}, validate=False)
value = env.cache.get(record, self)
else:
# non-stored field or stored field on new record: default value
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
defaults = record.default_get([self.name])
if self.name in defaults:
# The null value above is necessary to convert x2many field
# values. For instance, converting [(Command.LINK, id)]
# accesses the field's current value, then adds the given
# id. Without an initial value, the conversion ends up here
# to determine the field's value, and generates an infinite
# recursion.
value = self.convert_to_cache(defaults[self.name], record)
env.cache.set(record, self, value)
return self.convert_to_record(value, record)
def mapped(self, records):
""" Return the values of ``self`` for ``records``, either as a list
(scalar fields), or as a recordset (relational fields).
This method is meant to be used internally and has very little benefit
over a simple call to `~odoo.models.BaseModel.mapped()` on a recordset.
"""
if self.name == 'id':
# not stored in cache
return list(records._ids)
if self.compute and self.store:
# process pending computations
self.recompute(records)
# retrieve values in cache, and fetch missing ones
vals = records.env.cache.get_until_miss(records, self)
while len(vals) < len(records):
# It is important to construct a 'remaining' recordset with the
# _prefetch_ids of the original recordset, in order to prefetch as
# many records as possible. If not done this way, scenarios such as
# [rec.line_ids.mapped('name') for rec in recs] would generate one
# query per record in `recs`!
remaining = records._browse(records.env, records[len(vals):]._ids, records._prefetch_ids)
self.__get__(first(remaining), type(remaining))
vals += records.env.cache.get_until_miss(remaining, self)
return self.convert_to_record_multi(vals, records)
def __set__(self, records, value):
""" set the value of field ``self`` on ``records`` """
protected_ids = []
new_ids = []
other_ids = []
for record_id in records._ids:
if record_id in records.env._protected.get(self, ()):
protected_ids.append(record_id)
elif not record_id:
new_ids.append(record_id)
else:
other_ids.append(record_id)
if protected_ids:
# records being computed: no business logic, no recomputation
protected_records = records.browse(protected_ids)
self.write(protected_records, value)
if new_ids:
# new records: no business logic
new_records = records.browse(new_ids)
with records.env.protecting(records.pool.field_computed.get(self, [self]), records):
if self.relational:
new_records.modified([self.name], before=True)
self.write(new_records, value)
new_records.modified([self.name])
if self.inherited:
# special case: also assign parent records if they are new
parents = records[self.related.split('.')[0]]
parents.filtered(lambda r: not r.id)[self.name] = value
if other_ids:
# base case: full business logic
records = records.browse(other_ids)
write_value = self.convert_to_write(value, records)
records.write({self.name: write_value})
############################################################################
#
# Computation of field values
#
def recompute(self, records):
""" Process the pending computations of ``self`` on ``records``. This
should be called only if ``self`` is computed and stored.
"""
to_compute_ids = records.env.all.tocompute.get(self)
if not to_compute_ids:
return
if self.recursive:
for record in records:
if record.id in to_compute_ids:
self.compute_value(record)
return
for record in records:
if record.id in to_compute_ids:
ids = expand_ids(record.id, to_compute_ids)
recs = record.browse(itertools.islice(ids, PREFETCH_MAX))
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
def compute_value(self, records):
""" Invoke the compute method on ``records``; the results are in cache. """
env = records.env
if self.compute_sudo:
records = records.sudo()
fields = records.pool.field_computed[self]
# Just in case the compute method does not assign a value, we already
# mark the computation as done. This is also necessary if the compute
# method accesses the old value of the field: the field will be fetched
# with _read(), which will flush() it. If the field is still to compute,
# the latter flush() will recursively compute this field!
for field in fields:
if field.store:
env.remove_to_compute(field, records)
try:
with records.env.protecting(fields, records):
records._compute_field_value(self)
except Exception:
for field in fields:
if field.store:
env.add_to_compute(field, records)
raise
def determine_inverse(self, records):
""" Given the value of ``self`` on ``records``, inverse the computation. """
determine(self.inverse, records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on ``self``. """
return determine(self.search, records, operator, value)
class Boolean(Field):
""" Encapsulates a :class:`bool`. """
type = 'boolean'
column_type = ('bool', 'bool')
def convert_to_column(self, value, record, values=None, validate=True):
return bool(value)
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, record):
return value
class Integer(Field):
""" Encapsulates an :class:`int`. """
type = 'integer'
column_type = ('int4', 'int4')
group_operator = 'sum'
def convert_to_column(self, value, record, values=None, validate=True):
return int(value or 0)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', None)
return int(value or 0)
def convert_to_record(self, value, record):
return value or 0
def convert_to_read(self, value, record, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
cache = records.env.cache
for record in records:
cache.set(record, self, value.id or 0)
def convert_to_export(self, value, record):
if value or value == 0:
return value
return ''
class Float(Field):
""" Encapsulates a :class:`float`.
The precision digits are given by the (optional) ``digits`` attribute.
:param digits: a pair (total, decimal) or a string referencing a
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
:type digits: tuple(int,int) or str
When a float is a quantity associated with an unit of measure, it is important
to use the right tool to compare or round values with the correct precision.
The Float class provides some static methods for this purpose:
:func:`~odoo.fields.Float.round()` to round a float with the given precision.
:func:`~odoo.fields.Float.is_zero()` to check if a float equals zero at the given precision.
:func:`~odoo.fields.Float.compare()` to compare two floats at the given precision.
.. admonition:: Example
To round a quantity with the precision of the unit of measure::
fields.Float.round(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To check if the quantity is zero with the precision of the unit of measure::
fields.Float.is_zero(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To compare two quantities::
field.Float.compare(self.product_uom_qty, self.qty_done, precision_rounding=self.product_uom_id.rounding)
The compare helper uses the __cmp__ semantics for historic purposes, therefore
the proper, idiomatic way to use this helper is like so:
if result == 0, the first and second floats are equal
if result < 0, the first float is lower than the second
if result > 0, the first float is greater than the second
"""
type = 'float'
column_cast_from = ('int4', 'numeric', 'float8')
_digits = None # digits argument passed to class initializer
group_operator = 'sum'
def __init__(self, string=Default, digits=Default, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
@property
def column_type(self):
# Explicit support for "falsy" digits (0, False) to indicate a NUMERIC
# field with no fixed precision. The values are saved in the database
# with all significant digits.
# FLOAT8 type is still the default when there is no precision because it
# is faster for most operations (sums, etc.)
return ('numeric', 'numeric') if self._digits is not None else \
('float8', 'double precision')
def get_digits(self, env):
if isinstance(self._digits, str):
precision = env['decimal.precision'].precision_get(self._digits)
return 16, precision
else:
return self._digits
_related__digits = property(attrgetter('_digits'))
def _description_digits(self, env):
return self.get_digits(env)
def convert_to_column(self, value, record, values=None, validate=True):
result = float(value or 0.0)
digits = self.get_digits(record.env)
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
if not validate:
return value
digits = self.get_digits(record.env)
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_export(self, value, record):
if value or value == 0.0:
return value
return ''
round = staticmethod(float_round)
is_zero = staticmethod(float_is_zero)
compare = staticmethod(float_compare)
class Monetary(Field):
""" Encapsulates a :class:`float` expressed in a given
:class:`res_currency<odoo.addons.base.models.res_currency.Currency>`.
The decimal precision and currency symbol are taken from the ``currency_field`` attribute.
:param str currency_field: name of the :class:`Many2one` field
holding the :class:`res_currency <odoo.addons.base.models.res_currency.Currency>`
this monetary field is expressed in (default: `\'currency_id\'`)
"""
type = 'monetary'
write_sequence = 10
column_type = ('numeric', 'numeric')
column_cast_from = ('float8',)
currency_field = None
group_operator = 'sum'
def __init__(self, string=Default, currency_field=Default, **kwargs):
super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs)
def _description_currency_field(self, env):
return self.get_currency_field(env[self.model_name])
def get_currency_field(self, model):
""" Return the name of the currency field. """
return self.currency_field or (
'currency_id' if 'currency_id' in model._fields else
'x_currency_id' if 'x_currency_id' in model._fields else
None
)
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
assert self.get_currency_field(model) in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.get_currency_field(model))
def setup_related(self, model):
super().setup_related(model)
if self.inherited:
self.currency_field = self.related_field.get_currency_field(model.env[self.related_field.model_name])
assert self.get_currency_field(model) in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.get_currency_field(model))
def convert_to_column(self, value, record, values=None, validate=True):
# retrieve currency from values or record
currency_field_name = self.get_currency_field(record)
currency_field = record._fields[currency_field_name]
if values and currency_field_name in values:
dummy = record.new({currency_field_name: values[currency_field_name]})
currency = dummy[currency_field_name]
elif values and currency_field.related and currency_field.related.split('.')[0] in values:
related_field_name = currency_field.related.split('.')[0]
dummy = record.new({related_field_name: values[related_field_name]})
currency = dummy[currency_field_name]
else:
# Note: this is wrong if 'record' is several records with different
# currencies, which is functional nonsense and should not happen
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record[:1].with_context(prefetch_fields=False)[currency_field_name]
value = float(value or 0.0)
if currency:
return float_repr(currency.round(value), currency.decimal_places)
return value
def convert_to_cache(self, value, record, validate=True):
# cache format: float
value = float(value or 0.0)
if value and validate:
# FIXME @rco-odoo: currency may not be already initialized if it is
# a function or related field!
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency_field = self.get_currency_field(record)
currency = record.sudo().with_context(prefetch_fields=False)[currency_field]
if len(currency) > 1:
raise ValueError("Got multiple currencies while assigning values of monetary field %s" % str(self))
elif currency:
value = currency.round(value)
return value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_read(self, value, record, use_name_get=True):
return value
def convert_to_write(self, value, record):
return value
class _String(Field):
""" Abstract class for string fields. """
translate = False # whether the field is translated
prefetch = None
def __init__(self, string=Default, **kwargs):
# translate is either True, False, or a callable
if 'translate' in kwargs and not callable(kwargs['translate']):
kwargs['translate'] = bool(kwargs['translate'])
super(_String, self).__init__(string=string, **kwargs)
def _setup_attrs(self, model_class, name):
super()._setup_attrs(model_class, name)
if self.prefetch is None:
# do not prefetch complex translated fields by default
self.prefetch = not callable(self.translate)
_related_translate = property(attrgetter('translate'))
def _description_translate(self, env):
return bool(self.translate)
def get_trans_terms(self, value):
""" Return the sequence of terms to translate found in `value`. """
if not callable(self.translate):
return [value] if value else []
terms = []
self.translate(terms.append, value)
return terms
def get_trans_func(self, records):
""" Return a translation function `translate` for `self` on the given
records; the function call `translate(record_id, value)` translates the
field value to the language given by the environment of `records`.
"""
if callable(self.translate):
rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records)
def translate(record_id, value):
src_trans = rec_src_trans[record_id]
return self.translate(src_trans.get, value)
else:
rec_trans = records.env['ir.translation']._get_ids(
'%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids)
def translate(record_id, value):
return rec_trans.get(record_id) or value
return translate
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
if not self.store:
return records
real_recs = records.filtered('id')
if not real_recs._ids:
return records
update_column = True
update_trans = False
single_lang = len(records.env['res.lang'].get_installed()) <= 1
if self.translate:
lang = records.env.lang or None # used in _update_translations below
if single_lang:
# a single language is installed
update_trans = True
elif callable(self.translate) or lang == 'en_US':
# update the source and synchronize translations
update_column = True
update_trans = True
elif lang != 'en_US' and lang is not None:
# update the translations only except if emptying
update_column = not cache_value
update_trans = True
# else: lang = None
# update towrite if modifying the source
if update_column:
towrite = records.env.all.towrite[self.model_name]
for rid in real_recs._ids:
# cache_value is already in database format
towrite[rid][self.name] = cache_value
if self.translate is True and cache_value:
tname = "%s,%s" % (records._name, self.name)
records.env['ir.translation']._set_source(tname, real_recs._ids, value)
if self.translate:
# invalidate the field in the other languages
cache.invalidate([(self, records.ids)])
cache.update(records, self, [cache_value] * len(records))
if update_trans:
if callable(self.translate):
# the source value of self has been updated, synchronize
# translated terms when possible
records.env['ir.translation']._sync_terms_translations(self, real_recs)
else:
# update translations
value = self.convert_to_column(value, records)
source_recs = real_recs.with_context(lang=None)
source_value = first(source_recs)[self.name]
if not source_value:
source_recs[self.name] = value
source_value = value
tname = "%s,%s" % (self.model_name, self.name)
if not value:
records.env['ir.translation'].search([
('name', '=', tname),
('type', '=', 'model'),
('res_id', 'in', real_recs._ids)
]).unlink()
elif single_lang:
records.env['ir.translation']._update_translations([dict(
src=source_value,
value=value,
name=tname,
lang=lang,
type='model',
state='translated',
res_id=res_id) for res_id in real_recs._ids])
else:
records.env['ir.translation']._set_ids(
tname, 'model', lang, real_recs._ids, value, source_value,
)
return records
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients.
:param int size: the maximum size of values stored for that field
:param bool trim: states whether the value is trimmed or not (by default,
``True``). Note that the trim operation is applied only by the web client.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'char'
column_cast_from = ('text',)
size = None # maximum size of values (deprecated)
trim = True # whether value is trimmed (only by web client)
def _setup_attrs(self, model_class, name):
super()._setup_attrs(model_class, name)
assert self.size is None or isinstance(self.size, int), \
"Char field %s with non-integer size %r" % (self, self.size)
@property
def column_type(self):
return ('varchar', pg_varchar(self.size))
def update_db_column(self, model, column):
if (
column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and
(self.size is None or column['character_maximum_length'] < self.size)
):
# the column's varchar size does not match self.size; convert it
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
super(Char, self).update_db_column(model, column)
_related_size = property(attrgetter('size'))
_related_trim = property(attrgetter('trim'))
_description_size = property(attrgetter('size'))
_description_trim = property(attrgetter('trim'))
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
return pycompat.to_text(value)[:self.size]
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return pycompat.to_text(value)[:self.size]
class Text(_String):
""" Very similar to :class:`Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'text'
column_type = ('text', 'text')
column_cast_from = ('varchar',)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return ustr(value)
class Html(_String):
""" Encapsulates an html code content.
:param bool sanitize: whether value must be sanitized (default: ``True``)
:param bool sanitize_tags: whether to sanitize tags
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_attributes: whether to sanitize attributes
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_style: whether to sanitize style attributes (default: ``False``)
:param bool strip_style: whether to strip style attributes
(removed and therefore not sanitized, default: ``False``)
:param bool strip_classes: whether to strip classes attributes (default: ``False``)
"""
type = 'html'
column_type = ('text', 'text')
column_cast_from = ('varchar',)
sanitize = True # whether value must be sanitized
sanitize_tags = True # whether to sanitize tags (only a white list of attributes is accepted)
sanitize_attributes = True # whether to sanitize attributes (only a white list of attributes is accepted)
sanitize_style = False # whether to sanitize style attributes
sanitize_form = True # whether to sanitize forms
strip_style = False # whether to strip style attributes (removed and therefore not sanitized)
strip_classes = False # whether to strip classes attributes
def _get_attrs(self, model_class, name):
# called by _setup_attrs(), working together with _String._setup_attrs()
attrs = super()._get_attrs(model_class, name)
# Translated sanitized html fields must use html_translate or a callable.
if attrs.get('translate') is True and attrs.get('sanitize', True):
attrs['translate'] = html_translate
return attrs
_related_sanitize = property(attrgetter('sanitize'))
_related_sanitize_tags = property(attrgetter('sanitize_tags'))
_related_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_related_sanitize_style = property(attrgetter('sanitize_style'))
_related_strip_style = property(attrgetter('strip_style'))
_related_strip_classes = property(attrgetter('strip_classes'))
_description_sanitize = property(attrgetter('sanitize'))
_description_sanitize_tags = property(attrgetter('sanitize_tags'))
_description_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_description_sanitize_style = property(attrgetter('sanitize_style'))
_description_strip_style = property(attrgetter('strip_style'))
_description_strip_classes = property(attrgetter('strip_classes'))
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
if self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
if validate and self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
def convert_to_record(self, value, record):
r = super().convert_to_record(value, record)
if isinstance(r, bytes):
r = r.decode()
return r and Markup(r)
def convert_to_read(self, value, record, use_name_get=True):
r = super().convert_to_read(value, record, use_name_get)
if isinstance(r, bytes):
r = r.decode()
return r and Markup(r)
def get_trans_terms(self, value):
# ensure the translation terms are stringified, otherwise we can break the PO file
return list(map(str, super().get_trans_terms(value)))
class Date(Field):
""" Encapsulates a python :class:`date <datetime.date>` object. """
type = 'date'
column_type = ('date', 'date')
column_cast_from = ('timestamp',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def today(*args):
"""Return the current day in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
return date.today()
@staticmethod
def context_today(record, timestamp=None):
"""Return the current date as seen in the client's timezone in a format
fit for date fields.
.. note:: This method may be used to compute default values.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones).
:rtype: date
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).date()
@staticmethod
def to_date(value):
"""Attempt to convert ``value`` to a :class:`date` object.
.. warning::
If a datetime object is given as value,
it will be converted to a date object and all
datetime-specific information will be lost (HMS, TZ, ...).
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: date or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
return value
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_date
@staticmethod
def to_string(value):
"""
Convert a :class:`date` or :class:`datetime` object to a string.
:param value: value to convert.
:return: a string representing ``value`` in the server's date format, if ``value`` is of
type :class:`datetime`, the hours, minute, seconds, tzinfo will be truncated.
:rtype: str
"""
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
if isinstance(value, datetime):
# TODO: better fix data files (crm demo data)
value = value.date()
# raise TypeError("%s (field %s) must be string or date, not datetime." % (value, self))
return self.to_date(value)
def convert_to_export(self, value, record):
if not value:
return ''
return self.from_string(value)
class Datetime(Field):
""" Encapsulates a python :class:`datetime <datetime.datetime>` object. """
type = 'datetime'
column_type = ('timestamp', 'timestamp')
column_cast_from = ('date',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def now(*args):
"""Return the current day and time in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
# microseconds must be annihilated as they don't comply with the server datetime format
return datetime.now().replace(microsecond=0)
@staticmethod
def today(*args):
"""Return the current day, at midnight (00:00:00)."""
return Datetime.now().replace(hour=0, minute=0, second=0)
@staticmethod
def context_timestamp(record, timestamp):
"""Return the given timestamp converted to the client's timezone.
.. note:: This method is *not* meant for use as a default initializer,
because datetime fields are automatically converted upon
display on client side. For default values, :meth:`now`
should be used instead.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone.
:return: timestamp converted to timezone-aware datetime in context timezone.
:rtype: datetime
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@staticmethod
def to_datetime(value):
"""Convert an ORM ``value`` into a :class:`datetime` value.
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: datetime or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
if value.tzinfo:
raise ValueError("Datetime field expects a naive datetime: %s" % value)
return value
return datetime.combine(value, time.min)
# TODO: fix data files
return datetime.strptime(value, DATETIME_FORMAT[:len(value)-2])
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_datetime
@staticmethod
def to_string(value):
"""Convert a :class:`datetime` or :class:`date` object to a string.
:param value: value to convert.
:type value: datetime or date
:return: a string representing ``value`` in the server's datetime format,
if ``value`` is of type :class:`date`,
the time portion will be midnight (00:00:00).
:rtype: str
"""
return value.strftime(DATETIME_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
return self.to_datetime(value)
def convert_to_export(self, value, record):
if not value:
return ''
value = self.convert_to_display_name(value, record)
return self.from_string(value)
def convert_to_display_name(self, value, record):
assert record, 'Record expected'
return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))
# http://initd.org/psycopg/docs/usage.html#binary-adaptation
# Received data is returned as buffer (in Python 2) or memoryview (in Python 3).
_BINARY = memoryview
class Binary(Field):
"""Encapsulates a binary content (e.g. a file).
:param bool attachment: whether the field should be stored as `ir_attachment`
or in a column of the model's table (default: ``True``).
"""
type = 'binary'
prefetch = False # not prefetched by default
_depends_context = ('bin_size',) # depends on context (content or size)
attachment = True # whether value is stored in attachment
@property
def column_type(self):
return None if self.attachment else ('bytea', 'bytea')
def _get_attrs(self, model_class, name):
attrs = super()._get_attrs(model_class, name)
if not attrs.get('store', True):
attrs['attachment'] = False
return attrs
_description_attachment = property(attrgetter('attachment'))
def convert_to_column(self, value, record, values=None, validate=True):
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast here.
# This str() coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
if not value:
return None
# Detect if the binary content is an SVG for restricting its upload
# only to system users.
magic_bytes = {
b'P', # first 6 bits of '<' (0x3C) b64 encoded
b'<', # plaintext XML tag opening
}
if isinstance(value, str):
value = value.encode()
if value[:1] in magic_bytes:
try:
decoded_value = base64.b64decode(value.translate(None, delete=b'\r\n'), validate=True)
except binascii.Error:
decoded_value = value
# Full mimetype detection
if (guess_mimetype(decoded_value).startswith('image/svg') and
not record.env.is_system()):
raise UserError(_("Only admins can upload SVG files."))
if isinstance(value, bytes):
return psycopg2.Binary(value)
try:
return psycopg2.Binary(str(value).encode('ascii'))
except UnicodeEncodeError:
raise UserError(_("ASCII characters are required for %s in %s") % (value, self.name))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, _BINARY):
return bytes(value)
if isinstance(value, str):
# the cache must contain bytes or memoryview, but sometimes a string
# is given when assigning a binary field (test `TestFileSeparator`)
return value.encode()
if isinstance(value, int) and \
(record._context.get('bin_size') or
record._context.get('bin_size_' + self.name)):
# If the client requests only the size of the field, we return that
# instead of the content. Presumably a separate request will be done
# to read the actual content, if necessary.
value = human_size(value)
# human_size can return False (-> None) or a string (-> encoded)
return value.encode() if value else None
return None if value is False else value
def convert_to_record(self, value, record):
if isinstance(value, _BINARY):
return bytes(value)
return False if value is None else value
def compute_value(self, records):
bin_size_name = 'bin_size_' + self.name
if records.env.context.get('bin_size') or records.env.context.get(bin_size_name):
# always compute without bin_size
records_no_bin_size = records.with_context(**{'bin_size': False, bin_size_name: False})
super().compute_value(records_no_bin_size)
# manually update the bin_size cache
cache = records.env.cache
for record_no_bin_size, record in zip(records_no_bin_size, records):
try:
value = cache.get(record_no_bin_size, self)
try:
value = base64.b64decode(value)
except (TypeError, binascii.Error):
pass
try:
if isinstance(value, (bytes, _BINARY)):
value = human_size(len(value))
except (TypeError):
pass
cache_value = self.convert_to_cache(value, record)
cache.set(record, self, cache_value)
except CacheMiss:
pass
else:
super().compute_value(records)
def read(self, records):
# values are stored in attachments, retrieve them
assert self.attachment
domain = [
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
]
# Note: the 'bin_size' flag is handled by the field 'datas' itself
data = {
att.res_id: att.datas
for att in records.env['ir.attachment'].sudo().search(domain)
}
cache = records.env.cache
for record in records:
cache.set(record, self, data.get(record.id, False))
def create(self, record_values):
assert self.attachment
if not record_values:
return
# create the attachments that store the values
env = record_values[0][0].env
with env.norecompute():
env['ir.attachment'].sudo().with_context(
binary_field_real_user=env.user,
).create([{
'name': self.name,
'res_model': self.model_name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record, value in record_values
if value
])
def write(self, records, value):
if not self.attachment:
return super().write(records, value)
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
if self.store:
# determine records that are known to be not null
not_null = cache.get_records_different_from(records, self, None)
cache.update(records, self, [cache_value] * len(records))
# retrieve the attachments that store the values, and adapt them
if self.store and any(records._ids):
real_records = records.filtered('id')
atts = records.env['ir.attachment'].sudo()
if not_null:
atts = atts.search([
('res_model', '=', self.model_name),
('res_field', '=', self.name),
('res_id', 'in', real_records.ids),
])
if value:
# update the existing attachments
atts.write({'datas': value})
atts_records = records.browse(atts.mapped('res_id'))
# create the missing attachments
missing = (real_records - atts_records)
if missing:
atts.create([{
'name': self.name,
'res_model': record._name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record in missing
])
else:
atts.unlink()
return records
class Image(Binary):
"""Encapsulates an image, extending :class:`Binary`.
If image size is greater than the ``max_width``/``max_height`` limit of pixels, the image will be
resized to the limit by keeping aspect ratio.
:param int max_width: the maximum width of the image (default: ``0``, no limit)
:param int max_height: the maximum height of the image (default: ``0``, no limit)
:param bool verify_resolution: whether the image resolution should be verified
to ensure it doesn't go over the maximum image resolution (default: ``True``).
See :class:`odoo.tools.image.ImageProcess` for maximum image resolution (default: ``50e6``).
.. note::
If no ``max_width``/``max_height`` is specified (or is set to 0) and ``verify_resolution`` is False,
the field content won't be verified at all and a :class:`Binary` field should be used.
"""
max_width = 0
max_height = 0
verify_resolution = True
def create(self, record_values):
new_record_values = []
for record, value in record_values:
# strange behavior when setting related image field, when `self`
# does not resize the same way as its related field
new_value = self._image_process(value)
new_record_values.append((record, new_value))
cache_value = self.convert_to_cache(value if self.related else new_value, record)
record.env.cache.update(record, self, [cache_value] * len(record))
super(Image, self).create(new_record_values)
def write(self, records, value):
try:
new_value = self._image_process(value)
except UserError:
if not any(records._ids):
# Some crap is assigned to a new record. This can happen in an
# onchange, where the client sends the "bin size" value of the
# field instead of its full value (this saves bandwidth). In
# this case, we simply don't assign the field: its value will be
# taken from the records' origin.
return
raise
super(Image, self).write(records, new_value)
cache_value = self.convert_to_cache(value if self.related else new_value, records)
records.env.cache.update(records, self, [cache_value] * len(records))
def _image_process(self, value):
return image_process(value,
size=(self.max_width, self.max_height),
verify_resolution=self.verify_resolution,
)
def _process_related(self, value):
"""Override to resize the related value before saving it on self."""
try:
return self._image_process(super()._process_related(value))
except UserError:
# Avoid the following `write` to fail if the related image was saved
# invalid, which can happen for pre-existing databases.
return False
class Selection(Field):
""" Encapsulates an exclusive choice between different values.
:param selection: specifies the possible values for this field.
It is given as either a list of pairs ``(value, label)``, or a model
method, or a method name.
:type selection: list(tuple(str,str)) or callable or str
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs ``(value, label)`` or
singletons ``(value,)``, where singleton values must appear in the
overridden selection. The new values are inserted in an order that is
consistent with the overridden selection and this list::
selection = [('a', 'A'), ('b', 'B')]
selection_add = [('c', 'C'), ('b',)]
> result = [('a', 'A'), ('c', 'C'), ('b', 'B')]
:type selection_add: list(tuple(str,str))
:param ondelete: provides a fallback mechanism for any overridden
field with a selection_add. It is a dict that maps every option
from the selection_add to a fallback action.
This fallback action will be applied to all records whose
selection_add option maps to it.
The actions can be any of the following:
- 'set null' -- the default, all records with this option
will have their selection value set to False.
- 'cascade' -- all records with this option will be
deleted along with the option itself.
- 'set default' -- all records with this option will be
set to the default of the field definition
- 'set VALUE' -- all records with this option will be
set to the given value
- <callable> -- a callable whose first and only argument will be
the set of records containing the specified Selection option,
for custom processing
The attribute ``selection`` is mandatory except in the case of
``related`` or extended fields.
"""
type = 'selection'
column_type = ('varchar', pg_varchar())
selection = None # [(value, string), ...], function or method name
validate = True # whether validating upon write
ondelete = None # {value: policy} (what to do when value is deleted)
def __init__(self, selection=Default, string=Default, **kwargs):
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
assert self.selection is not None, "Field %s without selection" % self
def setup_related(self, model):
super().setup_related(model)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def _get_attrs(self, model_class, name):
attrs = super()._get_attrs(model_class, name)
# arguments 'selection' and 'selection_add' are processed below
attrs.pop('selection_add', None)
# Selection fields have an optional default implementation of a group_expand function
if attrs.get('group_expand') is True:
attrs['group_expand'] = self._default_group_expand
return attrs
def _setup_attrs(self, model_class, name):
super()._setup_attrs(model_class, name)
if not self._base_fields:
return
# determine selection (applying 'selection_add' extensions)
values = None
labels = {}
for field in self._base_fields:
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``_setup_attrs``.
if 'selection' in field.args:
if self.related:
_logger.warning("%s: selection attribute will be ignored as the field is related", self)
selection = field.args['selection']
if isinstance(selection, list):
if values is not None and values != [kv[0] for kv in selection]:
_logger.warning("%s: selection=%r overrides existing selection; use selection_add instead", self, selection)
values = [kv[0] for kv in selection]
labels = dict(selection)
self.ondelete = {}
else:
values = None
labels = {}
self.selection = selection
self.ondelete = None
if 'selection_add' in field.args:
if self.related:
_logger.warning("%s: selection_add attribute will be ignored as the field is related", self)
selection_add = field.args['selection_add']
assert isinstance(selection_add, list), \
"%s: selection_add=%r must be a list" % (self, selection_add)
assert values is not None, \
"%s: selection_add=%r on non-list selection %r" % (self, selection_add, self.selection)
ondelete = field.args.get('ondelete') or {}
new_values = [kv[0] for kv in selection_add if kv[0] not in values]
for key in new_values:
ondelete.setdefault(key, 'set null')
if self.required and new_values and 'set null' in ondelete.values():
raise ValueError(
"%r: required selection fields must define an ondelete policy that "
"implements the proper cleanup of the corresponding records upon "
"module uninstallation. Please use one or more of the following "
"policies: 'set default' (if the field has a default defined), 'cascade', "
"or a single-argument callable where the argument is the recordset "
"containing the specified option." % self
)
# check ondelete values
for key, val in ondelete.items():
if callable(val) or val in ('set null', 'cascade'):
continue
if val == 'set default':
assert self.default is not None, (
"%r: ondelete policy of type 'set default' is invalid for this field "
"as it does not define a default! Either define one in the base "
"field, or change the chosen ondelete policy" % self
)
elif val.startswith('set '):
assert val[4:] in values, (
"%s: ondelete policy of type 'set %%' must be either 'set null', "
"'set default', or 'set value' where value is a valid selection value."
) % self
else:
raise ValueError(
"%r: ondelete policy %r for selection value %r is not a valid ondelete"
" policy, please choose one of 'set null', 'set default', "
"'set [value]', 'cascade' or a callable" % (self, val, key)
)
values = merge_sequences(values, [kv[0] for kv in selection_add])
labels.update(kv for kv in selection_add if len(kv) == 2)
self.ondelete.update(ondelete)
if values is not None:
self.selection = [(value, labels[value]) for value in values]
if isinstance(self.selection, list):
assert all(isinstance(v, str) for v, _ in self.selection), \
"Field %s with non-str value in selection" % self
def _selection_modules(self, model):
""" Return a mapping from selection values to modules defining each value. """
if not isinstance(self.selection, list):
return {}
value_modules = defaultdict(set)
for field in reversed(resolve_mro(model, self.name, type(self).__instancecheck__)):
module = field._module
if not module:
continue
if 'selection' in field.args:
value_modules.clear()
if isinstance(field.args['selection'], list):
for value, label in field.args['selection']:
value_modules[value].add(module)
if 'selection_add' in field.args:
for value_label in field.args['selection_add']:
if len(value_label) > 1:
value_modules[value_label[0]].add(module)
return value_modules
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, str) or callable(selection):
return determine(selection, env[self.model_name])
# translate selection labels
if env.lang:
return env['ir.translation'].get_field_selection(self.model_name, self.name)
else:
return selection
def _default_group_expand(self, records, groups, domain, order):
# return a group per selection option, in definition order
return self.get_values(records.env)
def get_values(self, env):
"""Return a list of the possible values."""
selection = self.selection
if isinstance(selection, str) or callable(selection):
selection = determine(selection, env[self.model_name])
return [value for value, _ in selection]
def convert_to_column(self, value, record, values=None, validate=True):
if validate and self.validate:
value = self.convert_to_cache(value, record)
return super(Selection, self).convert_to_column(value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or None
if value and self.column_type[0] == 'int4':
value = int(value)
if value in self.get_values(record.env):
return value
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value if value else ''
for item in self._description_selection(record.env):
if item[0] == value:
return item[1]
return ''
class Reference(Selection):
""" Pseudo-relational field (no FK in database).
The field value is stored as a :class:`string <str>` following the pattern
``"res_model,res_id"`` in database.
"""
type = 'reference'
@property
def column_type(self):
return ('varchar', pg_varchar())
def convert_to_column(self, value, record, values=None, validate=True):
return Field.convert_to_column(self, value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
# cache format: str ("model,id") or None
if isinstance(value, BaseModel):
if not validate or (value._name in self.get_values(record.env) and len(value) <= 1):
return "%s,%s" % (value._name, value.id) if value else None
elif isinstance(value, str):
res_model, res_id = value.split(',')
if not validate or res_model in self.get_values(record.env):
if record.env[res_model].browse(int(res_id)).exists():
return value
else:
return None
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_record(self, value, record):
if value:
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
return None
def convert_to_read(self, value, record, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
domain = [] # domain for searching values
context = {} # context for searching values
check_company = False
def __get__(self, records, owner):
# base case: do the regular access
if records is None or len(records._ids) <= 1:
return super().__get__(records, owner)
# multirecord case: use mapped
return self.mapped(records)
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
if self.comodel_name not in model.pool:
_logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name)
self.comodel_name = '_unknown'
def get_domain_list(self, model):
""" Return a list domain from the domain parameter. """
domain = self.domain
if callable(domain):
domain = domain(model)
return domain if isinstance(domain, list) else []
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
if self.check_company and not self.domain:
if self.company_dependent:
if self.comodel_name == "res.users":
# user needs access to current company (self.env.company)
return "[('company_ids', 'in', allowed_company_ids[0])]"
else:
return "[('company_id', 'in', [allowed_company_ids[0], False])]"
else:
# when using check_company=True on a field on 'res.company', the
# company_id comes from the id of the current record
cid = "id" if self.model_name == "res.company" else "company_id"
if self.comodel_name == "res.users":
# User allowed company ids = user.company_ids
return f"['|', (not {cid}, '=', True), ('company_ids', 'in', [{cid}])]"
else:
return f"[('company_id', 'in', [{cid}, False])]"
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
def null(self, record):
return record.env[self.comodel_name]
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param str comodel_name: name of the target model
``Mandatory`` except for related or extended fields.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param str ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param bool delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
"""
type = 'many2one'
column_type = ('int4', 'int4')
ondelete = None # what to do when value is deleted
auto_join = False # whether joins are generated upon search
delegate = False # whether self implements delegation
def __init__(self, comodel_name=Default, string=Default, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def _setup_attrs(self, model_class, name):
super()._setup_attrs(model_class, name)
# determine self.delegate
if not self.delegate and name in model_class._inherits.values():
self.delegate = True
# self.delegate implies self.auto_join
if self.delegate:
self.auto_join = True
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
# 3 cases:
# 1) The ondelete attribute is not defined, we assign it a sensible default
# 2) The ondelete attribute is defined and its definition makes sense
# 3) The ondelete attribute is explicitly defined as 'set null' for a required m2o,
# this is considered a programming error.
if not self.ondelete:
comodel = model.env[self.comodel_name]
if model.is_transient() and not comodel.is_transient():
# Many2one relations from TransientModel Model are annoying because
# they can block deletion due to foreign keys. So unless stated
# otherwise, we default them to ondelete='cascade'.
self.ondelete = 'cascade' if self.required else 'set null'
else:
self.ondelete = 'restrict' if self.required else 'set null'
if self.ondelete == 'set null' and self.required:
raise ValueError(
"The m2o field %s of model %s is required but declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name)
)
if self.ondelete == 'restrict' and self.comodel_name in IR_MODELS:
raise ValueError(
f"Field {self.name} of model {model._name} is defined as ondelete='restrict' "
f"while having {self.comodel_name} as comodel, the 'restrict' mode is not "
f"supported for this type of field as comodel."
)
def update_db(self, model, columns):
comodel = model.env[self.comodel_name]
if not model.is_transient() and comodel.is_transient():
raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self)
return super(Many2one, self).update_db(model, columns)
def update_db_column(self, model, column):
super(Many2one, self).update_db_column(model, column)
model.pool.post_init(self.update_db_foreign_key, model, column)
def update_db_foreign_key(self, model, column):
comodel = model.env[self.comodel_name]
# foreign keys do not work on views, and users can define custom models on sql views.
if not model._is_an_ordinary_table() or not comodel._is_an_ordinary_table():
return
# ir_actions is inherited, so foreign key doesn't work on it
if not comodel._auto or comodel._table == 'ir_actions':
return
# create/update the foreign key, and reflect it in 'ir.model.constraint'
model.pool.add_foreign_key(
model._table, self.name, comodel._table, 'id', self.ondelete or 'set null',
model, self._module
)
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
cache = records.env.cache
for record in records:
cache.set(record, self, self.convert_to_cache(value, record, validate=False))
def convert_to_column(self, value, record, values=None, validate=True):
return value or None
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if type(value) in IdType:
id_ = value
elif isinstance(value, BaseModel):
if validate and (value._name != self.comodel_name or len(value) > 1):
raise ValueError("Wrong value for %s: %r" % (self, value))
id_ = value._ids[0] if value._ids else None
elif isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
id_ = value[0] if value else None
elif isinstance(value, dict):
# return a new record (with the given field 'id' as origin)
comodel = record.env[self.comodel_name]
origin = comodel.browse(value.get('id'))
id_ = comodel.new(value, origin=origin).id
else:
id_ = None
if self.delegate and record and not any(record._ids):
# if all records are new, then so is the parent
id_ = id_ and NewId(id_)
return id_
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
ids = () if value is None else (value,)
prefetch_ids = IterableGenerator(prefetch_many2one_ids, record, self)
return record.pool[self.comodel_name]._browse(record.env, ids, prefetch_ids)
def convert_to_record_multi(self, values, records):
# return the ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_many2one_ids, records, self)
ids = tuple(unique(id_ for id_ in values if id_ is not None))
return records.pool[self.comodel_name]._browse(records.env, ids, prefetch_ids)
def convert_to_read(self, value, record, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
# performance: value.sudo() prefetches the same records as value
return (value.id, value.sudo().display_name)
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, record):
if type(value) in IdType:
return value
if not value:
return False
if isinstance(value, BaseModel) and value._name == self.comodel_name:
return value.id
if isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
return value[0] if value else False
if isinstance(value, dict):
return record.env[self.comodel_name].new(value).id
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value.display_name)
def convert_to_onchange(self, value, record, names):
# if value is a new record, serialize its origin instead
return super().convert_to_onchange(value._origin, record, names)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
# remove records from the cache of one2many fields of old corecords
self._remove_inverses(records, cache_value)
# update the cache of self
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
for record in records.filtered('id'):
# cache_value is already in database format
towrite[record.id][self.name] = cache_value
# update the cache of one2many fields of new corecord
self._update_inverses(records, cache_value)
return records
def _remove_inverses(self, records, value):
""" Remove `records` from the cached values of the inverse fields of `self`. """
cache = records.env.cache
record_ids = set(records._ids)
# align(id) returns a NewId if records are new, a real id otherwise
align = (lambda id_: id_) if all(record_ids) else (lambda id_: id_ and NewId(id_))
for invf in records.pool.field_inverses[self]:
corecords = records.env[self.comodel_name].browse(
align(id_) for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if value is None:
return
cache = records.env.cache
corecord = self.convert_to_record(value, records)
for invf in records.pool.field_inverses[self]:
valid_records = records.filtered_domain(invf.get_domain_list(corecord))
if not valid_records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + valid_records._ids))
cache.set(corecord, invf, ids1)
class Many2oneReference(Integer):
""" Pseudo-relational field (no FK in database).
The field value is stored as an :class:`integer <int>` id in database.
Contrary to :class:`Reference` fields, the model has to be specified
in a :class:`Char` field, whose name has to be specified in the
`model_field` attribute for the current :class:`Many2oneReference` field.
:param str model_field: name of the :class:`Char` where the model name is stored.
"""
type = 'many2one_reference'
model_field = None
_related_model_field = property(attrgetter('model_field'))
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if isinstance(value, BaseModel):
value = value._ids[0] if value._ids else None
return super().convert_to_cache(value, record, validate)
def _remove_inverses(self, records, value):
# TODO: unused
# remove records from the cache of one2many fields of old corecords
cache = records.env.cache
record_ids = set(records._ids)
model_ids = self._record_ids_per_res_model(records)
for invf in records.pool.field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecords = records.env[invf.model_name].browse(
id_ for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if not value:
return
cache = records.env.cache
model_ids = self._record_ids_per_res_model(records)
for invf in records.pool.field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecord = records.env[invf.model_name].browse(value)
records = records.filtered_domain(invf.get_domain_list(corecord))
if not records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + records._ids))
cache.set(corecord, invf, ids1)
def _record_ids_per_res_model(self, records):
model_ids = defaultdict(set)
for record in records:
model = record[self.model_field]
if not model and record._fields[self.model_field].compute:
# fallback when the model field is computed :-/
record._fields[self.model_field].compute_value(record)
model = record[self.model_field]
if not model:
continue
model_ids[model].add(record.id)
return model_ids
class Command(enum.IntEnum):
"""
:class:`~odoo.fields.One2many` and :class:`~odoo.fields.Many2many` fields
expect a special command to manipulate the relation they implement.
Internally, each command is a 3-elements tuple where the first element is a
mandatory integer that identifies the command, the second element is either
the related record id to apply the command on (commands update, delete,
unlink and link) either 0 (commands create, clear and set), the third
element is either the ``values`` to write on the record (commands create
and update) either the new ``ids`` list of related records (command set),
either 0 (commands delete, unlink, link, and clear).
Via Python, we encourage developers craft new commands via the various
functions of this namespace. We also encourage developers to use the
command identifier constant names when comparing the 1st element of
existing commands.
Via RPC, it is impossible nor to use the functions nor the command constant
names. It is required to instead write the literal 3-elements tuple where
the first element is the integer identifier of the command.
"""
CREATE = 0
UPDATE = 1
DELETE = 2
UNLINK = 3
LINK = 4
CLEAR = 5
SET = 6
@classmethod
def create(cls, values: dict):
"""
Create new records in the comodel using ``values``, link the created
records to ``self``.
In case of a :class:`~odoo.fields.Many2many` relation, one unique
new record is created in the comodel such that all records in `self`
are linked to the new record.
In case of a :class:`~odoo.fields.One2many` relation, one new record
is created in the comodel for every record in ``self`` such that every
record in ``self`` is linked to exactly one of the new records.
Return the command triple :samp:`(CREATE, 0, {values})`
"""
return (cls.CREATE, 0, values)
@classmethod
def update(cls, id: int, values: dict):
"""
Write ``values`` on the related record.
Return the command triple :samp:`(UPDATE, {id}, {values})`
"""
return (cls.UPDATE, id, values)
@classmethod
def delete(cls, id: int):
"""
Remove the related record from the database and remove its relation
with ``self``.
In case of a :class:`~odoo.fields.Many2many` relation, removing the
record from the database may be prevented if it is still linked to
other records.
Return the command triple :samp:`(DELETE, {id}, 0)`
"""
return (cls.DELETE, id, 0)
@classmethod
def unlink(cls, id: int):
"""
Remove the relation between ``self`` and the related record.
In case of a :class:`~odoo.fields.One2many` relation, the given record
is deleted from the database if the inverse field is set as
``ondelete='cascade'``. Otherwise, the value of the inverse field is
set to False and the record is kept.
Return the command triple :samp:`(UNLINK, {id}, 0)`
"""
return (cls.UNLINK, id, 0)
@classmethod
def link(cls, id: int):
"""
Add a relation between ``self`` and the related record.
Return the command triple :samp:`(LINK, {id}, 0)`
"""
return (cls.LINK, id, 0)
@classmethod
def clear(cls):
"""
Remove all records from the relation with ``self``. It behaves like
executing the `unlink` command on every record.
Return the command triple :samp:`(CLEAR, 0, 0)`
"""
return (cls.CLEAR, 0, 0)
@classmethod
def set(cls, ids: list):
"""
Replace the current relations of ``self`` by the given ones. It behaves
like executing the ``unlink`` command on every removed relation then
executing the ``link`` command on every new relation.
Return the command triple :samp:`(SET, 0, {ids})`
"""
return (cls.SET, 0, ids)
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
write_sequence = 20
# Important: the cache contains the ids of all the records in the relation,
# including inactive records. Inactive records are filtered out by
# convert_to_record(), depending on the context.
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``,
and return whether everything is in cache.
"""
if not isinstance(records, BaseModel):
# the inverse of self is a non-relational field; `value` is a
# corecord that refers to `records` by an integer field
model = value.env[self.model_name]
domain = self.domain(model) if callable(self.domain) else self.domain
if not value.filtered_domain(domain):
return
records = model.browse(records)
result = True
if value:
cache = records.env.cache
for record in records:
if cache.contains(record, self):
val = self.convert_to_cache(record[self.name] | value, record, validate=False)
cache.set(record, self, val)
else:
result = False
records.modified([self.name])
return result
def convert_to_cache(self, value, record, validate=True):
# cache format: tuple(ids)
if isinstance(value, BaseModel):
if validate and value._name != self.comodel_name:
raise ValueError("Wrong value for %s: %s" % (self, value))
ids = value._ids
if record and not record.id:
# x2many field value of new record is new records
ids = tuple(it and NewId(it) for it in ids)
return ids
elif isinstance(value, (list, tuple)):
# value is a list/tuple of commands, dicts or record ids
comodel = record.env[self.comodel_name]
# if record is new, the field's value is new records
if record and not record.id:
browse = lambda it: comodel.browse([it and NewId(it)])
else:
browse = comodel.browse
# determine the value ids
ids = OrderedSet(record[self.name]._ids if validate else ())
# modify ids with the commands
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == Command.CREATE:
ids.add(comodel.new(command[2], ref=command[1]).id)
elif command[0] == Command.UPDATE:
line = browse(command[1])
if validate:
line.update(command[2])
else:
line._update_cache(command[2], validate=False)
ids.add(line.id)
elif command[0] in (Command.DELETE, Command.UNLINK):
ids.discard(browse(command[1]).id)
elif command[0] == Command.LINK:
ids.add(browse(command[1]).id)
elif command[0] == Command.CLEAR:
ids.clear()
elif command[0] == Command.SET:
ids = OrderedSet(browse(it).id for it in command[2])
elif isinstance(command, dict):
ids.add(comodel.new(command).id)
else:
ids.add(browse(command).id)
# return result as a tuple
return tuple(ids)
elif not value:
return ()
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
prefetch_ids = IterableGenerator(prefetch_x2many_ids, record, self)
Comodel = record.pool[self.comodel_name]
corecords = Comodel._browse(record.env, value, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', record.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_record_multi(self, values, records):
# return the list of ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_x2many_ids, records, self)
Comodel = records.pool[self.comodel_name]
ids = tuple(unique(id_ for ids in values for id_ in ids))
corecords = Comodel._browse(records.env, ids, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', records.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_read(self, value, record, use_name_get=True):
return value.ids
def convert_to_write(self, value, record):
if isinstance(value, tuple):
# a tuple of ids, this is the cache format
value = record.env[self.comodel_name].browse(value)
if isinstance(value, BaseModel) and value._name == self.comodel_name:
def get_origin(val):
return val._origin if isinstance(val, BaseModel) else val
# make result with new and existing records
inv_names = {field.name for field in record.pool.field_inverses[self]}
result = [Command.set([])]
for record in value:
origin = record._origin
if not origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names
})
result.append(Command.create(values))
else:
result[0][2].append(origin.id)
if record != origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names and get_origin(record[name]) != origin[name]
})
if values:
result.append(Command.update(origin.id, values))
return result
if value is False or value is None:
return [Command.clear()]
if isinstance(value, list):
return value
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_export(self, value, record):
return ','.join(name for id, name in value.name_get()) if value else ''
def convert_to_display_name(self, value, record):
raise NotImplementedError()
def get_depends(self, model):
depends, depends_context = super().get_depends(model)
if not self.compute and isinstance(self.domain, list):
depends = unique(itertools.chain(depends, (
self.name + '.' + arg[0]
for arg in self.domain
if isinstance(arg, (tuple, list)) and isinstance(arg[0], str)
)))
return depends, depends_context
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
self.write_batch(record_values, True)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
return self.write_batch([(records, value)])
def write_batch(self, records_commands_list, create=False):
if not records_commands_list:
return False
for idx, (recs, value) in enumerate(records_commands_list):
if isinstance(value, tuple):
value = [Command.set(value)]
elif isinstance(value, BaseModel) and value._name == self.comodel_name:
value = [Command.set(value._ids)]
elif value is False or value is None:
value = [Command.clear()]
elif isinstance(value, list) and value and not isinstance(value[0], (tuple, list)):
value = [Command.set(tuple(value))]
if not isinstance(value, list):
raise ValueError("Wrong value for %s: %s" % (self, value))
records_commands_list[idx] = (recs, value)
record_ids = {rid for recs, cs in records_commands_list for rid in recs._ids}
if all(record_ids):
return self.write_real(records_commands_list, create)
else:
assert not any(record_ids)
return self.write_new(records_commands_list)
class One2many(_RelationalMulti):
"""One2many field; the value of such a field is the recordset of all the
records in ``comodel_name`` such that the field ``inverse_name`` is equal to
the current record.
:param str comodel_name: name of the target model
:param str inverse_name: name of the inverse ``Many2one`` field in
``comodel_name``
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param int limit: optional limit to use upon read
The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
inverse_name = None # name of the inverse field
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
copy = False # o2m are not copied by default
def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def setup_nonrelated(self, model):
super(One2many, self).setup_nonrelated(model)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = model.env[self.comodel_name]
invf = comodel._fields[self.inverse_name]
if isinstance(invf, (Many2one, Many2oneReference)):
# setting one2many fields only invalidates many2one inverses;
# integer inverses (res_model/res_id pairs) are not supported
model.pool.field_inverses.add(self, invf)
comodel.pool.field_inverses.add(invf, self)
_description_relation_field = property(attrgetter('inverse_name'))
def update_db(self, model, columns):
if self.comodel_name in model.env:
comodel = model.env[self.comodel_name]
if self.inverse_name not in comodel._fields:
raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name))
def get_domain_list(self, records):
comodel = records.env.registry[self.comodel_name]
inverse_field = comodel._fields[self.inverse_name]
domain = super(One2many, self).get_domain_list(records)
if inverse_field.type == 'many2one_reference':
domain = domain + [(inverse_field.model_field, '=', records._name)]
return domain
def __get__(self, records, owner):
if records is not None and self.inverse_name is not None:
# force the computation of the inverse field to ensure that the
# cache value of self is consistent
inverse_field = records.pool[self.comodel_name]._fields[self.inverse_name]
if inverse_field.compute:
records.env[self.comodel_name].recompute([self.inverse_name])
return super().__get__(records, owner)
def read(self, records):
# retrieve the lines in the comodel
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
inverse = self.inverse_name
inverse_field = comodel._fields[inverse]
get_id = (lambda rec: rec.id) if inverse_field.type == 'many2one' else int
domain = self.get_domain_list(records) + [(inverse, 'in', records.ids)]
lines = comodel.search(domain, limit=self.limit)
# group lines by inverse field (without prefetching other fields)
group = defaultdict(list)
for line in lines.with_context(prefetch_fields=False):
# line[inverse] may be a record or an integer
group[get_id(line[inverse])].append(line.id)
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
""" Update real records. """
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {rid for recs, cs in records_commands_list for rid in recs.ids}
records = records_commands_list[0][0].browse(ids)
if self.store:
inverse = self.inverse_name
to_create = [] # line vals to create
to_delete = [] # line ids to delete
to_link = defaultdict(set) # {record: line_ids}
allow_full_delete = not create
def unlink(lines):
if getattr(comodel._fields[inverse], 'ondelete', False) == 'cascade':
to_delete.extend(lines._ids)
else:
lines[inverse] = False
def flush():
if to_link:
before = {record: record[self.name] for record in to_link}
if to_delete:
# unlink() will remove the lines from the cache
comodel.browse(to_delete).unlink()
to_delete.clear()
if to_create:
# create() will add the new lines to the cache of records
comodel.create(to_create)
to_create.clear()
if to_link:
for record, line_ids in to_link.items():
lines = comodel.browse(line_ids) - before[record]
# linking missing lines should fail
lines.mapped(inverse)
lines[inverse] = record
to_link.clear()
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == Command.CREATE:
for record in recs:
to_create.append(dict(command[2], **{inverse: record.id}))
allow_full_delete = False
elif command[0] == Command.UPDATE:
comodel.browse(command[1]).write(command[2])
elif command[0] == Command.DELETE:
to_delete.append(command[1])
elif command[0] == Command.UNLINK:
unlink(comodel.browse(command[1]))
elif command[0] == Command.LINK:
to_link[recs[-1]].add(command[1])
allow_full_delete = False
elif command[0] in (Command.CLEAR, Command.SET):
# do not try to delete anything in creation mode if nothing has been created before
line_ids = command[2] if command[0] == Command.SET else []
if not allow_full_delete and not line_ids:
continue
flush()
# assign the given lines to the last record only
lines = comodel.browse(line_ids)
domain = self.get_domain_list(model) + \
[(inverse, 'in', recs.ids), ('id', 'not in', lines.ids)]
unlink(comodel.search(domain))
lines[inverse] = recs[-1]
flush()
else:
cache = records.env.cache
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == Command.CREATE:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == Command.UPDATE:
comodel.browse(command[1]).write(command[2])
elif command[0] == Command.DELETE:
unlink(comodel.browse(command[1]))
elif command[0] == Command.UNLINK:
unlink(comodel.browse(command[1]))
elif command[0] == Command.LINK:
link(recs[-1], comodel.browse(command[1]))
elif command[0] in (Command.CLEAR, Command.SET):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == Command.SET else [])
cache.set(recs[-1], self, lines._ids)
return records
def write_new(self, records_commands_list):
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
cache = model.env.cache
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {record.id for records, _ in records_commands_list for record in records}
records = model.browse(ids)
def browse(ids):
return comodel.browse([id_ and NewId(id_) for id_ in ids])
# make sure self is in cache
records[self.name]
if self.store:
inverse = self.inverse_name
# make sure self's inverse is in cache
inverse_field = comodel._fields[inverse]
for record in records:
cache.update(record[self.name], inverse_field, itertools.repeat(record.id))
for recs, commands in records_commands_list:
for command in commands:
if command[0] == Command.CREATE:
for record in recs:
line = comodel.new(command[2], ref=command[1])
line[inverse] = record
elif command[0] == Command.UPDATE:
browse([command[1]]).update(command[2])
elif command[0] == Command.DELETE:
browse([command[1]])[inverse] = False
elif command[0] == Command.UNLINK:
browse([command[1]])[inverse] = False
elif command[0] == Command.LINK:
browse([command[1]])[inverse] = recs[-1]
elif command[0] == Command.CLEAR:
cache.update(recs, self, itertools.repeat(()))
elif command[0] == Command.SET:
# assign the given lines to the last record only
cache.update(recs, self, itertools.repeat(()))
last, lines = recs[-1], browse(command[2])
cache.set(last, self, lines._ids)
cache.update(lines, inverse_field, itertools.repeat(last.id))
else:
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in commands:
if command[0] == Command.CREATE:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == Command.UPDATE:
browse([command[1]]).update(command[2])
elif command[0] == Command.DELETE:
unlink(browse([command[1]]))
elif command[0] == Command.UNLINK:
unlink(browse([command[1]]))
elif command[0] == Command.LINK:
link(recs[-1], browse([command[1]]))
elif command[0] in (Command.CLEAR, Command.SET):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == Command.SET else [])
cache.set(recs[-1], self, lines._ids)
return records
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
mandatory except in the case of related or extended fields
:param str relation: optional name of the table that stores the relation in
the database
:param str column1: optional name of the column referring to "these" records
in the table ``relation``
:param str column2: optional name of the column referring to "those" records
in the table ``relation``
The attributes ``relation``, ``column1`` and ``column2`` are optional.
If not given, names are automatically generated from model names,
provided ``model_name`` and ``comodel_name`` are different!
Note that having several fields with implicit relation parameters on a
given model with the same comodel is not accepted by the ORM, since
those field would use the same table. The ORM prevents two many2many
fields to use the same relation parameters, except if
- both fields use the same model, comodel, and relation parameters are
explicit; or
- at least one field belongs to a model with ``_auto = False``.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
:param int limit: optional limit to use upon read
"""
type = 'many2many'
_explicit = True # whether schema is explicitly given
relation = None # name of table
column1 = None # column of table referring to model
column2 = None # column of table referring to comodel
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
ondelete = 'cascade' # optional ondelete for the column2 fkey
def __init__(self, comodel_name=Default, relation=Default, column1=Default,
column2=Default, string=Default, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def setup_nonrelated(self, model):
super().setup_nonrelated(model)
# 2 cases:
# 1) The ondelete attribute is defined and its definition makes sense
# 2) The ondelete attribute is explicitly defined as 'set null' for a m2m,
# this is considered a programming error.
if self.ondelete not in ('cascade', 'restrict'):
raise ValueError(
"The m2m field %s of model %s declares its ondelete policy "
"as being %r. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name, self.ondelete)
)
if self.store:
if not (self.relation and self.column1 and self.column2):
if not self.relation:
self._explicit = False
# table name is based on the stable alphabetical order of tables
comodel = model.env[self.comodel_name]
if not self.relation:
tables = sorted([model._table, comodel._table])
assert tables[0] != tables[1], \
"%s: Implicit/canonical naming of many2many relationship " \
"table is not possible when source and destination models " \
"are the same" % self
self.relation = '%s_%s_rel' % tuple(tables)
if not self.column1:
self.column1 = '%s_id' % model._table
if not self.column2:
self.column2 = '%s_id' % comodel._table
# check validity of table name
check_pg_name(self.relation)
else:
self.relation = self.column1 = self.column2 = None
if self.relation:
m2m = model.pool._m2m
# check whether other fields use the same schema
fields = m2m[(self.relation, self.column1, self.column2)]
for field in fields:
if ( # same model: relation parameters must be explicit
self.model_name == field.model_name and
self.comodel_name == field.comodel_name and
self._explicit and field._explicit
) or ( # different models: one model must be _auto=False
self.model_name != field.model_name and
not (model._auto and model.env[field.model_name]._auto)
):
continue
msg = "Many2many fields %s and %s use the same table and columns"
raise TypeError(msg % (self, field))
fields.append(self)
# retrieve inverse fields, and link them in field_inverses
for field in m2m[(self.relation, self.column2, self.column1)]:
model.pool.field_inverses.add(self, field)
model.pool.field_inverses.add(field, self)
def update_db(self, model, columns):
cr = model._cr
# Do not reflect relations for custom fields, as they do not belong to a
# module. They are automatically removed when dropping the corresponding
# 'ir.model.field'.
if not self.manual:
model.pool.post_init(model.env['ir.model.relation']._reflect_relation,
model, self.relation, self._module)
comodel = model.env[self.comodel_name]
if not sql.table_exists(cr, self.relation):
query = """
CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL,
"{id2}" INTEGER NOT NULL,
PRIMARY KEY("{id1}","{id2}"));
COMMENT ON TABLE "{rel}" IS %s;
CREATE INDEX ON "{rel}" ("{id2}","{id1}");
""".format(rel=self.relation, id1=self.column1, id2=self.column2)
cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)])
_schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table)
model.pool.post_init(self.update_db_foreign_keys, model)
return True
model.pool.post_init(self.update_db_foreign_keys, model)
def update_db_foreign_keys(self, model):
""" Add the foreign keys corresponding to the field's relation table. """
comodel = model.env[self.comodel_name]
if model._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column1, model._table, 'id', 'cascade',
model, self._module, force=False,
)
if comodel._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column2, comodel._table, 'id', self.ondelete,
model, self._module,
)
@property
def groupable(self):
return self.store
def read(self, records):
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
domain = self.get_domain_list(records)
comodel._flush_search(domain)
wquery = comodel._where_calc(domain)
comodel._apply_ir_rules(wquery, 'read')
order_by = comodel._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c}
WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id
{order_by} {limit} OFFSET {offset}
""".format(rel=self.relation, id1=self.column1, id2=self.column2,
tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1',
limit=(' LIMIT %d' % self.limit) if self.limit else '',
offset=0, order_by=order_by)
where_params.append(tuple(records.ids))
# retrieve lines and group them by record
group = defaultdict(list)
records._cr.execute(query, where_params)
for row in records._cr.fetchall():
group[row[0]].append(row[1])
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
cr = model.env.cr
# determine old and new relation {x: ys}
set = OrderedSet
ids = set(rid for recs, cs in records_commands_list for rid in recs.ids)
records = model.browse(ids)
if self.store:
# Using `record[self.name]` generates 2 SQL queries when the value
# is not in cache: one that actually checks access rules for
# records, and the other one fetching the actual data. We use
# `self.read` instead to shortcut the first query.
missing_ids = list(records.env.cache.get_missing_ids(records, self))
if missing_ids:
self.read(records.browse(missing_ids))
# determine new relation {x: ys}
old_relation = {record.id: set(record[self.name]._ids) for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
# operations on new relation
def relation_add(xs, y):
for x in xs:
new_relation[x].add(y)
def relation_remove(xs, y):
for x in xs:
new_relation[x].discard(y)
def relation_set(xs, ys):
for x in xs:
new_relation[x] = set(ys)
def relation_delete(ys):
# the pairs (x, y) have been cascade-deleted from relation
for ys1 in old_relation.values():
ys1 -= ys
for ys1 in new_relation.values():
ys1 -= ys
for recs, commands in records_commands_list:
to_create = [] # line vals to create
to_delete = [] # line ids to delete
for command in (commands or ()):
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == Command.CREATE:
to_create.append((recs._ids, command[2]))
elif command[0] == Command.UPDATE:
comodel.browse(command[1]).write(command[2])
elif command[0] == Command.DELETE:
to_delete.append(command[1])
elif command[0] == Command.UNLINK:
relation_remove(recs._ids, command[1])
elif command[0] == Command.LINK:
relation_add(recs._ids, command[1])
elif command[0] in (Command.CLEAR, Command.SET):
# new lines must no longer be linked to records
to_create = [(set(ids) - set(recs._ids), vals) for (ids, vals) in to_create]
relation_set(recs._ids, command[2] if command[0] == Command.SET else ())
if to_create:
# create lines in batch, and link them
lines = comodel.create([vals for ids, vals in to_create])
for line, (ids, vals) in zip(lines, to_create):
relation_add(ids, line.id)
if to_delete:
# delete lines in batch
comodel.browse(to_delete).unlink()
relation_delete(to_delete)
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
if self.store:
query = "INSERT INTO {} ({}, {}) VALUES {} ON CONFLICT DO NOTHING".format(
self.relation, self.column1, self.column2, ", ".join(["%s"] * len(pairs)),
)
cr.execute(query, pairs)
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records.pool.field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
if self.store:
# express pairs as the union of cartesian products:
# pairs = [(1, 11), (1, 12), (1, 13), (2, 11), (2, 12), (2, 14)]
# -> y_to_xs = {11: {1, 2}, 12: {1, 2}, 13: {1}, 14: {2}}
# -> xs_to_ys = {{1, 2}: {11, 12}, {2}: {14}, {1}: {13}}
xs_to_ys = defaultdict(set)
for y, xs in y_to_xs.items():
xs_to_ys[frozenset(xs)].add(y)
# delete the rows where (id1 IN xs AND id2 IN ys) OR ...
COND = "{} IN %s AND {} IN %s".format(self.column1, self.column2)
query = "DELETE FROM {} WHERE {}".format(
self.relation, " OR ".join([COND] * len(xs_to_ys)),
)
params = [arg for xs, ys in xs_to_ys.items() for arg in [tuple(xs), tuple(ys)]]
cr.execute(query, params)
# update the cache of inverse fields
for invf in records.pool.field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
def write_new(self, records_commands_list):
""" Update self on new records. """
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
new = lambda id_: id_ and NewId(id_)
# determine old and new relation {x: ys}
set = OrderedSet
old_relation = {record.id: set(record[self.name]._ids) for records, _ in records_commands_list for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
ids = set(old_relation.keys())
records = model.browse(ids)
for recs, commands in records_commands_list:
for command in commands:
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == Command.CREATE:
line_id = comodel.new(command[2], ref=command[1]).id
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] == Command.UPDATE:
line_id = new(command[1])
comodel.browse([line_id]).update(command[2])
elif command[0] == Command.DELETE:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == Command.UNLINK:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == Command.LINK:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] in (Command.CLEAR, Command.SET):
# new lines must no longer be linked to records
line_ids = command[2] if command[0] == Command.SET else ()
line_ids = set(new(line_id) for line_id in line_ids)
for id_ in recs._ids:
new_relation[id_] = set(line_ids)
if new_relation == old_relation:
return records.browse()
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records.pool.field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records.pool.field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
class Id(Field):
""" Special case for field 'id'. """
type = 'integer'
column_type = ('int4', 'int4')
string = 'ID'
store = True
readonly = True
prefetch = False
def update_db(self, model, columns):
pass # this column is created with the table
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
# the code below is written to make record.id as quick as possible
ids = record._ids
size = len(ids)
if size == 0:
return False
elif size == 1:
return ids[0]
raise ValueError("Expected singleton: %s" % record)
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
def prefetch_many2one_ids(record, field):
""" Return an iterator over the ids of the cached values of a many2one
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids = record.env.cache.get_values(records, field)
return unique(id_ for id_ in ids if id_ is not None)
def prefetch_x2many_ids(record, field):
""" Return an iterator over the ids of the cached values of an x2many
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids_list = record.env.cache.get_values(records, field)
return unique(id_ for ids in ids_list for id_ in ids)
def apply_required(model, field_name):
""" Set a NOT NULL constraint on the given field, if necessary. """
# At the time this function is called, the model's _fields may have been reset, although
# the model's class is still the same. Retrieve the field to see whether the NOT NULL
# constraint still applies
field = model._fields[field_name]
if field.store and field.required:
sql.set_not_null(model.env.cr, model._table, field_name)
# imported here to avoid dependency cycle issues
# pylint: disable=wrong-import-position
from .exceptions import AccessError, MissingError, UserError
from .models import (
check_pg_name, expand_ids, is_definition_class, is_registry_class,
BaseModel, IdType, NewId, PREFETCH_MAX,
)
| 43.131016 | 177,441 |
3,663 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sys
LOG_NOTSET = 'notset'
LOG_DEBUG = 'debug'
LOG_INFO = 'info'
LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
# TODO get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are here until we refactor tools so that this module doesn't depends on tools.
def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'iso-8859-8-i': 'iso8859-8',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc
# not using pycompat to avoid circular import: pycompat is in tools much of
# which comes back to import loglevels
text_type = type(u'')
def ustr(value, hint_encoding='utf-8', errors='strict'):
"""This method is similar to the builtin `unicode`, except
that it may try multiple encodings to find one that works
for decoding `value`, and defaults to 'utf-8' first.
:param value: the value to convert
:param hint_encoding: an optional encoding that was detected
upstream and should be tried first to decode ``value``.
:param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
# We use direct type comparison instead of `isinstance`
# as much as possible, in order to make the most common
# cases faster (isinstance/issubclass are significantly slower)
ttype = type(value)
if ttype is text_type:
return value
# special short-circuit for str, as we still needs to support
# str subclasses such as `odoo.tools.unquote`
if ttype is bytes or issubclass(ttype, bytes):
# try hint_encoding first, avoids call to get_encoding()
# for the most common case
try:
return value.decode(hint_encoding, errors=errors)
except Exception:
pass
# rare: no luck with hint_encoding, attempt other ones
for ln in get_encodings(hint_encoding):
try:
return value.decode(ln, errors=errors)
except Exception:
pass
if isinstance(value, Exception):
return exception_to_unicode(value)
# fallback for non-string values
try:
return text_type(value)
except Exception:
raise UnicodeError('unable to convert %r' % (value,))
def exception_to_unicode(e):
if getattr(e, 'args', ()):
return "\n".join((ustr(a) for a in e.args))
try:
return text_type(e)
except Exception:
return u"Unknown message"
| 35.221154 | 3,663 |
38,952 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""The Odoo API module defines Odoo Environments and method decorators.
.. todo:: Document this module
"""
__all__ = [
'Environment',
'Meta',
'model',
'constrains', 'depends', 'onchange', 'returns',
'call_kw',
]
import logging
import warnings
from collections import defaultdict
from collections.abc import Mapping
from contextlib import contextmanager
from inspect import signature
from pprint import pformat
from weakref import WeakSet
from decorator import decorate
from .exceptions import CacheMiss
from .tools import frozendict, classproperty, lazy_property, StackMap
from .tools.translate import _
_logger = logging.getLogger(__name__)
# The following attributes are used, and reflected on wrapping methods:
# - method._constrains: set by @constrains, specifies constraint dependencies
# - method._depends: set by @depends, specifies compute dependencies
# - method._returns: set by @returns, specifies return model
# - method._onchange: set by @onchange, specifies onchange fields
# - method.clear_cache: set by @ormcache, used to clear the cache
# - method._ondelete: set by @ondelete, used to raise errors for unlink operations
#
# On wrapping method only:
# - method._api: decorator function, used for re-applying decorator
#
INHERITED_ATTRS = ('_returns',)
class Params(object):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def __str__(self):
params = []
for arg in self.args:
params.append(repr(arg))
for item in sorted(self.kwargs.items()):
params.append("%s=%r" % item)
return ', '.join(params)
class Meta(type):
""" Metaclass that automatically decorates traditional-style methods by
guessing their API. It also implements the inheritance of the
:func:`returns` decorators.
"""
def __new__(meta, name, bases, attrs):
# dummy parent class to catch overridden methods decorated with 'returns'
parent = type.__new__(meta, name, bases, {})
for key, value in list(attrs.items()):
if not key.startswith('__') and callable(value):
# make the method inherit from decorators
value = propagate(getattr(parent, key, None), value)
if (getattr(value, '_api', None) or '').startswith('cr'):
_logger.warning("Deprecated method %s.%s in module %s", name, key, attrs.get('__module__'))
attrs[key] = value
return type.__new__(meta, name, bases, attrs)
def attrsetter(attr, value):
""" Return a function that sets ``attr`` on its argument and returns it. """
return lambda method: setattr(method, attr, value) or method
def propagate(method1, method2):
""" Propagate decorators from ``method1`` to ``method2``, and return the
resulting method.
"""
if method1:
for attr in INHERITED_ATTRS:
if hasattr(method1, attr) and not hasattr(method2, attr):
setattr(method2, attr, getattr(method1, attr))
return method2
def constrains(*args):
"""Decorate a constraint checker.
Each argument must be a field name used in the check::
@api.constrains('name', 'description')
def _check_description(self):
for record in self:
if record.name == record.description:
raise ValidationError("Fields name and description must be different")
Invoked on the records on which one of the named fields has been modified.
Should raise :exc:`~odoo.exceptions.ValidationError` if the
validation failed.
.. warning::
``@constrains`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.customer``) are not
supported and will be ignored.
``@constrains`` will be triggered only if the declared fields in the
decorated method are included in the ``create`` or ``write`` call.
It implies that fields not present in a view will not trigger a call
during a record creation. A override of ``create`` is necessary to make
sure a constraint will always be triggered (e.g. to test the absence of
value).
One may also pass a single function as argument. In that case, the field
names are given by calling the function with a model instance.
"""
if args and callable(args[0]):
args = args[0]
return attrsetter('_constrains', args)
def ondelete(*, at_uninstall):
"""
Mark a method to be executed during :meth:`~odoo.models.BaseModel.unlink`.
The goal of this decorator is to allow client-side errors when unlinking
records if, from a business point of view, it does not make sense to delete
such records. For instance, a user should not be able to delete a validated
sales order.
While this could be implemented by simply overriding the method ``unlink``
on the model, it has the drawback of not being compatible with module
uninstallation. When uninstalling the module, the override could raise user
errors, but we shouldn't care because the module is being uninstalled, and
thus **all** records related to the module should be removed anyway.
This means that by overriding ``unlink``, there is a big chance that some
tables/records may remain as leftover data from the uninstalled module. This
leaves the database in an inconsistent state. Moreover, there is a risk of
conflicts if the module is ever reinstalled on that database.
Methods decorated with ``@ondelete`` should raise an error following some
conditions, and by convention, the method should be named either
``_unlink_if_<condition>`` or ``_unlink_except_<not_condition>``.
.. code-block:: python
@api.ondelete(at_uninstall=False)
def _unlink_if_user_inactive(self):
if any(user.active for user in self):
raise UserError("Can't delete an active user!")
# same as above but with _unlink_except_* as method name
@api.ondelete(at_uninstall=False)
def _unlink_except_active_user(self):
if any(user.active for user in self):
raise UserError("Can't delete an active user!")
:param bool at_uninstall: Whether the decorated method should be called if
the module that implements said method is being uninstalled. Should
almost always be ``False``, so that module uninstallation does not
trigger those errors.
.. danger::
The parameter ``at_uninstall`` should only be set to ``True`` if the
check you are implementing also applies when uninstalling the module.
For instance, it doesn't matter if when uninstalling ``sale``, validated
sales orders are being deleted because all data pertaining to ``sale``
should be deleted anyway, in that case ``at_uninstall`` should be set to
``False``.
However, it makes sense to prevent the removal of the default language
if no other languages are installed, since deleting the default language
will break a lot of basic behavior. In this case, ``at_uninstall``
should be set to ``True``.
"""
return attrsetter('_ondelete', at_uninstall)
def onchange(*args):
"""Return a decorator to decorate an onchange method for given fields.
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
.. code-block:: python
return {
'warning': {'title': "Warning", 'message': "What is this?", 'type': 'notification'},
}
If the type is set to notification, the warning will be displayed in a notification.
Otherwise it will be displayed in a dialog as default.
.. warning::
``@onchange`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.tz``) are not
supported and will be ignored
.. danger::
Since ``@onchange`` returns a recordset of pseudo-records,
calling any one of the CRUD methods
(:meth:`create`, :meth:`read`, :meth:`write`, :meth:`unlink`)
on the aforementioned recordset is undefined behaviour,
as they potentially do not exist in the database yet.
Instead, simply set the record's field like shown in the example
above or call the :meth:`update` method.
.. warning::
It is not possible for a ``one2many`` or ``many2many`` field to modify
itself via onchange. This is a webclient limitation - see `#2693 <https://github.com/odoo/odoo/issues/2693>`_.
"""
return attrsetter('_onchange', args)
def depends(*args):
""" Return a decorator that specifies the field dependencies of a "compute"
method (for new-style function fields). Each argument must be a string
that consists in a dot-separated sequence of field names::
pname = fields.Char(compute='_compute_pname')
@api.depends('partner_id.name', 'partner_id.is_company')
def _compute_pname(self):
for record in self:
if record.partner_id.is_company:
record.pname = (record.partner_id.name or "").upper()
else:
record.pname = record.partner_id.name
One may also pass a single function as argument. In that case, the
dependencies are given by calling the function with the field's model.
"""
if args and callable(args[0]):
args = args[0]
elif any('id' in arg.split('.') for arg in args):
raise NotImplementedError("Compute method cannot depend on field 'id'.")
return attrsetter('_depends', args)
def depends_context(*args):
""" Return a decorator that specifies the context dependencies of a
non-stored "compute" method. Each argument is a key in the context's
dictionary::
price = fields.Float(compute='_compute_product_price')
@api.depends_context('pricelist')
def _compute_product_price(self):
for product in self:
if product.env.context.get('pricelist'):
pricelist = self.env['product.pricelist'].browse(product.env.context['pricelist'])
else:
pricelist = self.env['product.pricelist'].get_default_pricelist()
product.price = pricelist.get_products_price(product).get(product.id, 0.0)
All dependencies must be hashable. The following keys have special
support:
* `company` (value in context or current company id),
* `uid` (current user id and superuser flag),
* `active_test` (value in env.context or value in field.context).
"""
return attrsetter('_depends_context', args)
def returns(model, downgrade=None, upgrade=None):
""" Return a decorator for methods that return instances of ``model``.
:param model: a model name, or ``'self'`` for the current model
:param downgrade: a function ``downgrade(self, value, *args, **kwargs)``
to convert the record-style ``value`` to a traditional-style output
:param upgrade: a function ``upgrade(self, value, *args, **kwargs)``
to convert the traditional-style ``value`` to a record-style output
The arguments ``self``, ``*args`` and ``**kwargs`` are the ones passed
to the method in the record-style.
The decorator adapts the method output to the api style: ``id``, ``ids`` or
``False`` for the traditional style, and recordset for the record style::
@model
@returns('res.partner')
def find_partner(self, arg):
... # return some record
# output depends on call style: traditional vs record style
partner_id = model.find_partner(cr, uid, arg, context=context)
# recs = model.browse(cr, uid, ids, context)
partner_record = recs.find_partner(arg)
Note that the decorated method must satisfy that convention.
Those decorators are automatically *inherited*: a method that overrides
a decorated existing method will be decorated with the same
``@returns(model)``.
"""
return attrsetter('_returns', (model, downgrade, upgrade))
def downgrade(method, value, self, args, kwargs):
""" Convert ``value`` returned by ``method`` on ``self`` to traditional style. """
spec = getattr(method, '_returns', None)
if not spec:
return value
_, convert, _ = spec
if convert and len(signature(convert).parameters) > 1:
return convert(self, value, *args, **kwargs)
elif convert:
return convert(value)
else:
return value.ids
def split_context(method, args, kwargs):
""" Extract the context from a pair of positional and keyword arguments.
Return a triple ``context, args, kwargs``.
"""
# altering kwargs is a cause of errors, for instance when retrying a request
# after a serialization error: the retry is done without context!
kwargs = kwargs.copy()
return kwargs.pop('context', None), args, kwargs
def autovacuum(method):
"""
Decorate a method so that it is called by the daily vacuum cron job (model
``ir.autovacuum``). This is typically used for garbage-collection-like
tasks that do not deserve a specific cron job.
"""
assert method.__name__.startswith('_'), "%s: autovacuum methods must be private" % method.__name__
method._autovacuum = True
return method
def model(method):
""" Decorate a record-style method where ``self`` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model
def method(self, args):
...
"""
if method.__name__ == 'create':
return model_create_single(method)
method._api = 'model'
return method
_create_logger = logging.getLogger(__name__ + '.create')
def _model_create_single(create, self, arg):
# 'create' expects a dict and returns a record
if isinstance(arg, Mapping):
return create(self, arg)
if len(arg) > 1:
_create_logger.debug("%s.create() called with %d dicts", self, len(arg))
return self.browse().concat(*(create(self, vals) for vals in arg))
def model_create_single(method):
""" Decorate a method that takes a dictionary and creates a single record.
The method may be called with either a single dict or a list of dicts::
record = model.create(vals)
records = model.create([vals, ...])
"""
wrapper = decorate(method, _model_create_single)
wrapper._api = 'model_create'
return wrapper
def _model_create_multi(create, self, arg):
# 'create' expects a list of dicts and returns a recordset
if isinstance(arg, Mapping):
return create(self, [arg])
return create(self, arg)
def model_create_multi(method):
""" Decorate a method that takes a list of dictionaries and creates multiple
records. The method may be called with either a single dict or a list of
dicts::
record = model.create(vals)
records = model.create([vals, ...])
"""
wrapper = decorate(method, _model_create_multi)
wrapper._api = 'model_create'
return wrapper
def _call_kw_model(method, self, args, kwargs):
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def _call_kw_model_create(method, self, args, kwargs):
# special case for method 'create'
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {})
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return result.id if isinstance(args[0], Mapping) else result.ids
def _call_kw_multi(method, self, args, kwargs):
ids, args = args[0], args[1:]
context, args, kwargs = split_context(method, args, kwargs)
recs = self.with_context(context or {}).browse(ids)
_logger.debug("call %s.%s(%s)", recs, method.__name__, Params(args, kwargs))
result = method(recs, *args, **kwargs)
return downgrade(method, result, recs, args, kwargs)
def call_kw(model, name, args, kwargs):
""" Invoke the given method ``name`` on the recordset ``model``. """
method = getattr(type(model), name)
api = getattr(method, '_api', None)
if api == 'model':
result = _call_kw_model(method, model, args, kwargs)
elif api == 'model_create':
result = _call_kw_model_create(method, model, args, kwargs)
else:
result = _call_kw_multi(method, model, args, kwargs)
model.flush()
return result
class Environment(Mapping):
""" An environment wraps data for ORM records:
- :attr:`cr`, the current database cursor;
- :attr:`uid`, the current user id;
- :attr:`context`, the current context dictionary;
- :attr:`su`, whether in superuser mode.
It provides access to the registry by implementing a mapping from model
names to new api models. It also holds a cache for records, and a data
structure to manage recomputations.
"""
@classproperty
def envs(cls):
raise NotImplementedError(
"Since Odoo 15.0, Environment.envs no longer works; "
"use cr.transaction or env.transaction instead."
)
@classmethod
@contextmanager
def manage(cls):
warnings.warn(
"Since Odoo 15.0, Environment.manage() is useless.",
DeprecationWarning, stacklevel=2,
)
yield
def reset(self):
""" Reset the transaction, see :meth:`Transaction.reset`. """
self.transaction.reset()
def __new__(cls, cr, uid, context, su=False):
if uid == SUPERUSER_ID:
su = True
assert context is not None
args = (cr, uid, context, su)
# determine transaction object
transaction = cr.transaction
if transaction is None:
transaction = cr.transaction = Transaction(Registry(cr.dbname))
# if env already exists, return it
for env in transaction.envs:
if env.args == args:
return env
# otherwise create environment, and add it in the set
self = object.__new__(cls)
args = (cr, uid, frozendict(context), su)
self.cr, self.uid, self.context, self.su = self.args = args
self.transaction = self.all = transaction
self.registry = transaction.registry
self.cache = transaction.cache
self._cache_key = {} # memo {field: cache_key}
self._protected = transaction.protected
transaction.envs.add(self)
return self
#
# Mapping methods
#
def __contains__(self, model_name):
""" Test whether the given model exists. """
return model_name in self.registry
def __getitem__(self, model_name):
""" Return an empty recordset from the given model. """
return self.registry[model_name]._browse(self, (), ())
def __iter__(self):
""" Return an iterator on model names. """
return iter(self.registry)
def __len__(self):
""" Return the size of the model registry. """
return len(self.registry)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return object.__hash__(self)
def __call__(self, cr=None, user=None, context=None, su=None):
""" Return an environment based on ``self`` with modified parameters.
:param cr: optional database cursor to change the current cursor
:param user: optional user/user id to change the current user
:param context: optional context dictionary to change the current context
:param su: optional boolean to change the superuser mode
:type context: dict
:type user: int or :class:`~odoo.addons.base.models.res_users`
:type su: bool
"""
cr = self.cr if cr is None else cr
uid = self.uid if user is None else int(user)
context = self.context if context is None else context
su = (user is None and self.su) if su is None else su
return Environment(cr, uid, context, su)
def ref(self, xml_id, raise_if_not_found=True):
"""Return the record corresponding to the given ``xml_id``."""
res_model, res_id = self['ir.model.data']._xmlid_to_res_model_res_id(
xml_id, raise_if_not_found=raise_if_not_found
)
if res_model and res_id:
record = self[res_model].browse(res_id)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return None
def is_superuser(self):
""" Return whether the environment is in superuser mode. """
return self.su
def is_admin(self):
""" Return whether the current user has group "Access Rights", or is in
superuser mode. """
return self.su or self.user._is_admin()
def is_system(self):
""" Return whether the current user has group "Settings", or is in
superuser mode. """
return self.su or self.user._is_system()
@lazy_property
def user(self):
"""Return the current user (as an instance).
:returns: current user - sudoed
:rtype: :class:`~odoo.addons.base.models.res_users`"""
return self(su=True)['res.users'].browse(self.uid)
@lazy_property
def company(self):
"""Return the current company (as an instance).
If not specified in the context (`allowed_company_ids`),
fallback on current user main company.
:raise AccessError: invalid or unauthorized `allowed_company_ids` context key content.
:return: current company (default=`self.user.company_id`), with the current environment
:rtype: res.company
.. warning::
No sanity checks applied in sudo mode !
When in sudo mode, a user can access any company,
even if not in his allowed companies.
This allows to trigger inter-company modifications,
even if the current user doesn't have access to
the targeted company.
"""
company_ids = self.context.get('allowed_company_ids', [])
if company_ids:
if not self.su:
user_company_ids = self.user.company_ids.ids
if any(cid not in user_company_ids for cid in company_ids):
raise AccessError(_("Access to unauthorized or invalid companies."))
return self['res.company'].browse(company_ids[0])
return self.user.company_id.with_env(self)
@lazy_property
def companies(self):
"""Return a recordset of the enabled companies by the user.
If not specified in the context(`allowed_company_ids`),
fallback on current user companies.
:raise AccessError: invalid or unauthorized `allowed_company_ids` context key content.
:return: current companies (default=`self.user.company_ids`), with the current environment
:rtype: res.company
.. warning::
No sanity checks applied in sudo mode !
When in sudo mode, a user can access any company,
even if not in his allowed companies.
This allows to trigger inter-company modifications,
even if the current user doesn't have access to
the targeted company.
"""
company_ids = self.context.get('allowed_company_ids', [])
if company_ids:
if not self.su:
user_company_ids = self.user.company_ids.ids
if any(cid not in user_company_ids for cid in company_ids):
raise AccessError(_("Access to unauthorized or invalid companies."))
return self['res.company'].browse(company_ids)
# By setting the default companies to all user companies instead of the main one
# we save a lot of potential trouble in all "out of context" calls, such as
# /mail/redirect or /web/image, etc. And it is not unsafe because the user does
# have access to these other companies. The risk of exposing foreign records
# (wrt to the context) is low because all normal RPCs will have a proper
# allowed_company_ids.
# Examples:
# - when printing a report for several records from several companies
# - when accessing to a record from the notification email template
# - when loading an binary image on a template
return self.user.company_ids.with_env(self)
@property
def lang(self):
"""Return the current language code.
:rtype: str
"""
return self.context.get('lang')
def clear(self):
""" Clear all record caches, and discard all fields to recompute.
This may be useful when recovering from a failed ORM operation.
"""
lazy_property.reset_all(self)
self.transaction.clear()
def clear_upon_failure(self):
""" Context manager that rolls back the environments (caches and pending
computations and updates) upon exception.
"""
warnings.warn(
"Since Odoo 15.0, use cr.savepoint() instead of env.clear_upon_failure().",
DeprecationWarning, stacklevel=2,
)
return self.cr.savepoint()
def is_protected(self, field, record):
""" Return whether `record` is protected against invalidation or
recomputation for `field`.
"""
return record.id in self._protected.get(field, ())
def protected(self, field):
""" Return the recordset for which ``field`` should not be invalidated or recomputed. """
return self[field.model_name].browse(self._protected.get(field, ()))
@contextmanager
def protecting(self, what, records=None):
""" Prevent the invalidation or recomputation of fields on records.
The parameters are either:
- ``what`` a collection of fields and ``records`` a recordset, or
- ``what`` a collection of pairs ``(fields, records)``.
"""
protected = self._protected
try:
protected.pushmap()
what = what if records is None else [(what, records)]
for fields, records in what:
for field in fields:
ids = protected.get(field, frozenset())
protected[field] = ids.union(records._ids)
yield
finally:
protected.popmap()
def fields_to_compute(self):
""" Return a view on the field to compute. """
return self.all.tocompute.keys()
def records_to_compute(self, field):
""" Return the records to compute for ``field``. """
ids = self.all.tocompute.get(field, ())
return self[field.model_name].browse(ids)
def is_to_compute(self, field, record):
""" Return whether ``field`` must be computed on ``record``. """
return record.id in self.all.tocompute.get(field, ())
def not_to_compute(self, field, records):
""" Return the subset of ``records`` for which ``field`` must not be computed. """
ids = self.all.tocompute.get(field, ())
return records.browse(id_ for id_ in records._ids if id_ not in ids)
def add_to_compute(self, field, records):
""" Mark ``field`` to be computed on ``records``. """
if not records:
return records
self.all.tocompute[field].update(records._ids)
def remove_to_compute(self, field, records):
""" Mark ``field`` as computed on ``records``. """
if not records:
return
ids = self.all.tocompute.get(field, None)
if ids is None:
return
ids.difference_update(records._ids)
if not ids:
del self.all.tocompute[field]
@contextmanager
def norecompute(self):
""" Delay recomputations (deprecated: this is not the default behavior). """
yield
def cache_key(self, field):
""" Return the cache key of the given ``field``. """
try:
return self._cache_key[field]
except KeyError:
def get(key, get_context=self.context.get):
if key == 'company':
return self.company.id
elif key == 'uid':
return (self.uid, self.su)
elif key == 'active_test':
return get_context('active_test', field.context.get('active_test', True))
else:
val = get_context(key)
if type(val) is list:
val = tuple(val)
try:
hash(val)
except TypeError:
raise TypeError(
"Can only create cache keys from hashable values, "
"got non-hashable value {!r} at context key {!r} "
"(dependency of field {})".format(val, key, field)
) from None # we don't need to chain the exception created 2 lines above
else:
return val
result = tuple(get(key) for key in self.registry.field_depends_context[field])
self._cache_key[field] = result
return result
class Transaction:
""" A object holding ORM data structures for a transaction. """
def __init__(self, registry):
self.registry = registry
# weak set of environments
self.envs = WeakSet()
# cache for all records
self.cache = Cache()
# fields to protect {field: ids}
self.protected = StackMap()
# pending computations {field: ids}
self.tocompute = defaultdict(set)
# pending updates {model: {id: {field: value}}}
self.towrite = defaultdict(lambda: defaultdict(dict))
def flush(self):
""" Flush pending computations and updates in the transaction. """
env_to_flush = None
for env in self.envs:
if isinstance(env.uid, int) or env.uid is None:
env_to_flush = env
if env.uid is not None:
break
if env_to_flush is not None:
env_to_flush['base'].flush()
def clear(self):
""" Clear the caches and pending computations and updates in the translations. """
self.cache.invalidate()
self.tocompute.clear()
self.towrite.clear()
def reset(self):
""" Reset the transaction. This clears the transaction, and reassigns
the registry on all its environments. This operation is strongly
recommended after reloading the registry.
"""
self.registry = Registry(self.registry.db_name)
for env in self.envs:
env.registry = self.registry
lazy_property.reset_all(env)
self.clear()
# sentinel value for optional parameters
NOTHING = object()
EMPTY_DICT = frozendict()
class Cache(object):
""" Implementation of the cache of records. """
def __init__(self):
# {field: {record_id: value}, field: {context_key: {record_id: value}}}
self._data = defaultdict(dict)
def _get_field_cache(self, model, field):
""" Return the field cache of the given field, but not for modifying it. """
field_cache = self._data.get(field, EMPTY_DICT)
if field_cache and model.pool.field_depends_context[field]:
field_cache = field_cache.get(model.env.cache_key(field), EMPTY_DICT)
return field_cache
def _set_field_cache(self, model, field):
""" Return the field cache of the given field for modifying it. """
field_cache = self._data[field]
if model.pool.field_depends_context[field]:
field_cache = field_cache.setdefault(model.env.cache_key(field), {})
return field_cache
def contains(self, record, field):
""" Return whether ``record`` has a value for ``field``. """
return record.id in self._get_field_cache(record, field)
def get(self, record, field, default=NOTHING):
""" Return the value of ``field`` for ``record``. """
try:
field_cache = self._get_field_cache(record, field)
return field_cache[record._ids[0]]
except KeyError:
if default is NOTHING:
raise CacheMiss(record, field)
return default
def set(self, record, field, value):
""" Set the value of ``field`` for ``record``. """
field_cache = self._set_field_cache(record, field)
field_cache[record._ids[0]] = value
def update(self, records, field, values):
""" Set the values of ``field`` for several ``records``. """
field_cache = self._set_field_cache(records, field)
field_cache.update(zip(records._ids, values))
def remove(self, record, field):
""" Remove the value of ``field`` for ``record``. """
try:
field_cache = self._set_field_cache(record, field)
del field_cache[record._ids[0]]
except KeyError:
pass
def get_values(self, records, field):
""" Return the cached values of ``field`` for ``records``. """
field_cache = self._get_field_cache(records, field)
for record_id in records._ids:
try:
yield field_cache[record_id]
except KeyError:
pass
def get_until_miss(self, records, field):
""" Return the cached values of ``field`` for ``records`` until a value is not found. """
field_cache = self._get_field_cache(records, field)
vals = []
for record_id in records._ids:
try:
vals.append(field_cache[record_id])
except KeyError:
break
return vals
def get_records_different_from(self, records, field, value):
""" Return the subset of ``records`` that has not ``value`` for ``field``. """
field_cache = self._get_field_cache(records, field)
ids = []
for record_id in records._ids:
try:
val = field_cache[record_id]
except KeyError:
ids.append(record_id)
else:
if val != value:
ids.append(record_id)
return records.browse(ids)
def get_fields(self, record):
""" Return the fields with a value for ``record``. """
for name, field in record._fields.items():
if name != 'id' and record.id in self._get_field_cache(record, field):
yield field
def get_records(self, model, field):
""" Return the records of ``model`` that have a value for ``field``. """
field_cache = self._get_field_cache(model, field)
return model.browse(field_cache)
def get_missing_ids(self, records, field):
""" Return the ids of ``records`` that have no value for ``field``. """
field_cache = self._get_field_cache(records, field)
for record_id in records._ids:
if record_id not in field_cache:
yield record_id
def invalidate(self, spec=None):
""" Invalidate the cache, partially or totally depending on ``spec``. """
if spec is None:
self._data.clear()
elif spec:
for field, ids in spec:
if ids is None:
self._data.pop(field, None)
continue
cache = self._data.get(field)
if not cache:
continue
caches = cache.values() if isinstance(next(iter(cache)), tuple) else [cache]
for field_cache in caches:
for id_ in ids:
field_cache.pop(id_, None)
def check(self, env):
""" Check the consistency of the cache for the given environment. """
# flush fields to be recomputed before evaluating the cache
env['res.partner'].recompute()
# make a copy of the cache, and invalidate it
dump = dict(self._data)
self.invalidate()
depends_context = env.registry.field_depends_context
# re-fetch the records, and compare with their former cache
invalids = []
def check(model, field, field_dump):
records = env[field.model_name].browse(field_dump)
for record in records:
if not record.id:
continue
try:
cached = field_dump[record.id]
value = field.convert_to_record(cached, record)
fetched = record[field.name]
if fetched != value:
info = {'cached': value, 'fetched': fetched}
invalids.append((record, field, info))
except (AccessError, MissingError):
pass
for field, field_dump in dump.items():
model = env[field.model_name]
if depends_context[field]:
for context_keys, field_cache in field_dump.items():
context = dict(zip(depends_context[field], context_keys))
check(model.with_context(context), field, field_cache)
else:
check(model, field, field_dump)
if invalids:
raise UserError('Invalid cache for fields\n' + pformat(invalids))
# keep those imports here in order to handle cyclic dependencies correctly
from odoo import SUPERUSER_ID
from odoo.exceptions import UserError, AccessError, MissingError
from odoo.modules.registry import Registry
| 38.039063 | 38,952 |
68,758 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------
# OpenERP HTTP layer
#----------------------------------------------------------
import ast
import cgi
import collections
import contextlib
import functools
import hashlib
import hmac
import inspect
import logging
import mimetypes
import os
import pprint
import random
import re
import sys
import threading
import time
import traceback
import warnings
from os.path import join as opj
from zlib import adler32
import babel.core
from datetime import datetime
import passlib.utils
import psycopg2
import json
import werkzeug.datastructures
import werkzeug.exceptions
import werkzeug.local
import werkzeug.routing
import werkzeug.wrappers
from werkzeug import urls
from werkzeug.wsgi import wrap_file
try:
from werkzeug.middleware.shared_data import SharedDataMiddleware
except ImportError:
from werkzeug.wsgi import SharedDataMiddleware
try:
import psutil
except ImportError:
psutil = None
import odoo
from .service.server import memory_info
from .service import security, model as service_model
from .tools.func import lazy_property
from .tools import profiler
from .tools import ustr, consteq, frozendict, pycompat, unique, date_utils
from .tools.mimetypes import guess_mimetype
from .tools.misc import str2bool
from .tools._vendor import sessions
from .tools._vendor.useragents import UserAgent
from .modules.module import read_manifest
_logger = logging.getLogger(__name__)
rpc_request = logging.getLogger(__name__ + '.rpc.request')
rpc_response = logging.getLogger(__name__ + '.rpc.response')
# One week cache for static content (static files in apps, library files, ...)
# Safe resources may use what google page speed recommends (1 year)
# (attachments with unique hash in the URL, ...)
STATIC_CACHE = 3600 * 24 * 7
STATIC_CACHE_LONG = 3600 * 24 * 365
# To remove when corrected in Babel
babel.core.LOCALE_ALIASES['nb'] = 'nb_NO'
""" Debug mode is stored in session and should always be a string.
It can be activated with an URL query string `debug=<mode>` where
mode is either:
- 'tests' to load tests assets
- 'assets' to load assets non minified
- any other truthy value to enable simple debug mode (to show some
technical feature, to show complete traceback in frontend error..)
- any falsy value to disable debug mode
You can use any truthy/falsy value from `str2bool` (eg: 'on', 'f'..)
Multiple debug modes can be activated simultaneously, separated with
a comma (eg: 'tests, assets').
"""
ALLOWED_DEBUG_MODES = ['', '1', 'assets', 'tests']
#----------------------------------------------------------
# RequestHandler
#----------------------------------------------------------
# Thread local global request object
_request_stack = werkzeug.local.LocalStack()
request = _request_stack()
"""
A global proxy that always redirect to the current request object.
"""
def replace_request_password(args):
# password is always 3rd argument in a request, we replace it in RPC logs
# so it's easier to forward logs for diagnostics/debugging purposes...
if len(args) > 2:
args = list(args)
args[2] = '*'
return tuple(args)
# don't trigger debugger for those exceptions, they carry user-facing warnings
# and indications, they're not necessarily indicative of anything being
# *broken*
NO_POSTMORTEM = (odoo.exceptions.AccessDenied,
odoo.exceptions.UserError,
odoo.exceptions.RedirectWarning)
def dispatch_rpc(service_name, method, params):
""" Handle a RPC call.
This is pure Python code, the actual marshalling (from/to XML-RPC) is done
in a upper layer.
"""
try:
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
start_time = time.time()
start_memory = 0
if psutil:
start_memory = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
odoo.netsvc.log(rpc_request, logging.DEBUG, '%s.%s' % (service_name, method), replace_request_password(params))
threading.current_thread().uid = None
threading.current_thread().dbname = None
if service_name == 'common':
dispatch = odoo.service.common.dispatch
elif service_name == 'db':
dispatch = odoo.service.db.dispatch
elif service_name == 'object':
dispatch = odoo.service.model.dispatch
result = dispatch(method, params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_memory = 0
if psutil:
end_memory = memory_info(psutil.Process(os.getpid()))
logline = '%s.%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (service_name, method, end_time - start_time, start_memory / 1024, end_memory / 1024, (end_memory - start_memory)/1024)
if rpc_response_flag:
odoo.netsvc.log(rpc_response, logging.DEBUG, logline, result)
else:
odoo.netsvc.log(rpc_request, logging.DEBUG, logline, replace_request_password(params), depth=1)
return result
except NO_POSTMORTEM:
raise
except Exception as e:
_logger.exception(odoo.tools.exception_to_unicode(e))
odoo.tools.debugger.post_mortem(odoo.tools.config, sys.exc_info())
raise
class WebRequest(object):
""" Parent class for all Odoo Web request types, mostly deals with
initialization and setup of the request object (the dispatching itself has
to be handled by the subclasses)
:param httprequest: a wrapped werkzeug Request object
:type httprequest: :class:`werkzeug.wrappers.BaseRequest`
.. attribute:: httprequest
the original :class:`werkzeug.wrappers.Request` object provided to the
request
.. attribute:: params
:class:`~collections.abc.Mapping` of request parameters, not generally
useful as they're provided directly to the handler method as keyword
arguments
"""
def __init__(self, httprequest):
self.httprequest = httprequest
self.httpresponse = None
self.endpoint = None
self.endpoint_arguments = None
self.auth_method = None
self._db = self.session.db
self._cr = None
self._uid = None
self._context = None
self._env = None
# prevents transaction commit, use when you catch an exception during handling
self._failed = None
# set db/uid trackers - they're cleaned up at the WSGI
# dispatching phase in odoo.service.wsgi_server.application
if self.db:
threading.current_thread().dbname = self.db
if self.session.uid:
threading.current_thread().uid = self.session.uid
@property
def cr(self):
""" :class:`~odoo.sql_db.Cursor` initialized for the current method call.
Accessing the cursor when the current request uses the ``none``
authentication will raise an exception.
"""
# can not be a lazy_property because manual rollback in _call_function
# if already set (?)
if not self.db:
raise RuntimeError('request not bound to a database')
if not self._cr:
self._cr = self.registry.cursor()
return self._cr
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, val):
self._uid = val
self._env = None
@property
def context(self):
""" :class:`~collections.abc.Mapping` of context values for the current request """
if self._context is None:
self._context = frozendict(self.session.context)
return self._context
@context.setter
def context(self, val):
self._context = frozendict(val)
self._env = None
@property
def env(self):
""" The :class:`~odoo.api.Environment` bound to current request. """
if self._env is None:
self._env = odoo.api.Environment(self.cr, self.uid, self.context)
return self._env
@lazy_property
def session(self):
""" :class:`OpenERPSession` holding the HTTP session data for the
current http session
"""
return self.httprequest.session
def __enter__(self):
_request_stack.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
_request_stack.pop()
if self._cr:
try:
if exc_type is None and not self._failed:
self._cr.commit()
if self.registry:
self.registry.signal_changes()
elif self.registry:
self.registry.reset_changes()
finally:
self._cr.close()
# just to be sure no one tries to re-use the request
self._db = None
self.uid = None
def set_handler(self, endpoint, arguments, auth):
# is this needed ?
arguments ={k: v for k, v in arguments.items()
if not k.startswith("_ignored_")}
self.endpoint_arguments = arguments
self.endpoint = endpoint
self.auth_method = auth
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
self._failed = exception # prevent tx commit
if not isinstance(exception, NO_POSTMORTEM) \
and not isinstance(exception, werkzeug.exceptions.HTTPException):
odoo.tools.debugger.post_mortem(
odoo.tools.config, sys.exc_info())
# WARNING: do not inline or it breaks: raise...from evaluates strictly
# LTR so would first remove traceback then copy lack of traceback
new_cause = Exception().with_traceback(exception.__traceback__)
new_cause.__cause__ = exception.__cause__ or exception.__context__
# tries to provide good chained tracebacks, just re-raising exception
# generates a weird message as stacks just get concatenated, exceptions
# not guaranteed to copy.copy cleanly & we want `exception` as leaf (for
# callers to check & look at)
raise exception.with_traceback(None) from new_cause
def redirect(self, location, code=303, local=True):
# compatibility, Werkzeug support URL as location
if isinstance(location, urls.URL):
location = location.to_url()
if local:
location = '/' + urls.url_parse(location).replace(scheme='', netloc='').to_url().lstrip('/')
if request and request.db:
return request.registry['ir.http']._redirect(location, code)
return werkzeug.utils.redirect(location, code, Response=Response)
def redirect_query(self, location, query=None, code=303, local=True):
if query:
location += '?' + urls.url_encode(query)
return self.redirect(location, code=code, local=local)
def _is_cors_preflight(self, endpoint):
return False
def _call_function(self, *args, **kwargs):
request = self
if self.endpoint.routing['type'] != self._request_type:
msg = "%s, %s: Function declared as capable of handling request of type '%s' but called with a request of type '%s'"
params = (self.endpoint.original, self.httprequest.path, self.endpoint.routing['type'], self._request_type)
_logger.info(msg, *params)
raise werkzeug.exceptions.BadRequest(msg % params)
if self.endpoint_arguments:
kwargs.update(self.endpoint_arguments)
# Backward for 7.0
if self.endpoint.first_arg_is_req:
args = (request,) + args
first_time = True
# Correct exception handling and concurrency retry
@service_model.check
def checked_call(___dbname, *a, **kw):
nonlocal first_time
# The decorator can call us more than once if there is an database error. In this
# case, the request cursor is unusable. Rollback transaction to create a new one.
if self._cr and not first_time:
self._cr.rollback()
self.env.clear()
# Rewind files in case of failure
if not first_time:
for filename, file in self.httprequest.files.items():
if hasattr(file, "seekable") and file.seekable():
file.seek(0)
else:
raise RuntimeError("Cannot retry request on input file %r after serialization failure" % filename)
first_time = False
result = self.endpoint(*a, **kw)
if isinstance(result, Response) and result.is_qweb:
# Early rendering of lazy responses to benefit from @service_model.check protection
result.flatten()
if self._cr is not None:
# flush here to avoid triggering a serialization error outside
# of this context, which would not retry the call
self._cr.flush()
return result
if self.db:
return checked_call(self.db, *args, **kwargs)
return self.endpoint(*args, **kwargs)
@contextlib.contextmanager
def registry_cr(self):
warnings.warn('please use request.registry and request.cr directly', DeprecationWarning)
yield (self.registry, self.cr)
@property
def registry(self):
"""
The registry to the database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
.. deprecated:: 8.0
use :attr:`.env`
"""
return odoo.registry(self.db)
@property
def db(self):
"""
The database linked to this request. Can be ``None``
if the current request uses the ``none`` authentication.
"""
if not self._db:
self._db = self.session.db
return self._db
def csrf_token(self, time_limit=None):
""" Generates and returns a CSRF token for the current session
:param time_limit: the CSRF token validity period (in seconds), or
``None`` for the token to be valid as long as the
current user session is (the default)
:type time_limit: int | None
:returns: ASCII token string
"""
token = self.session.sid
# if no `time_limit` => distant 1y expiry (31536000) so max_ts acts as salt, e.g. vs BREACH
max_ts = int(time.time() + (time_limit or 31536000))
msg = '%s%s' % (token, max_ts)
secret = self.env['ir.config_parameter'].sudo().get_param('database.secret')
assert secret, "CSRF protection requires a configured database secret"
hm = hmac.new(secret.encode('ascii'), msg.encode('utf-8'), hashlib.sha1).hexdigest()
return '%so%s' % (hm, max_ts)
def validate_csrf(self, csrf):
if not csrf:
return False
try:
hm, _, max_ts = str(csrf).rpartition('o')
except UnicodeEncodeError:
return False
if max_ts:
try:
if int(max_ts) < int(time.time()):
return False
except ValueError:
return False
token = self.session.sid
msg = '%s%s' % (token, max_ts)
secret = self.env['ir.config_parameter'].sudo().get_param('database.secret')
assert secret, "CSRF protection requires a configured database secret"
hm_expected = hmac.new(secret.encode('ascii'), msg.encode('utf-8'), hashlib.sha1).hexdigest()
return consteq(hm, hm_expected)
def route(route=None, **kw):
"""Decorator marking the decorated method as being a handler for
requests. The method must be part of a subclass of ``Controller``.
:param route: string or array. The route part that will determine which
http requests will match the decorated method. Can be a
single string or an array of strings. See werkzeug's routing
documentation for the format of route expression (
http://werkzeug.pocoo.org/docs/routing/ ).
:param type: The type of request, can be ``'http'`` or ``'json'``.
:param auth: The type of authentication method, can on of the following:
* ``user``: The user must be authenticated and the current request
will perform using the rights of the user.
* ``public``: The user may or may not be authenticated. If she isn't,
the current request will perform using the shared Public user.
* ``none``: The method is always active, even if there is no
database. Mainly used by the framework and authentication
modules. There request code will not have any facilities to access
the database nor have any configuration indicating the current
database nor the current user.
:param methods: A sequence of http methods this route applies to. If not
specified, all methods are allowed.
:param cors: The Access-Control-Allow-Origin cors directive value.
:param bool csrf: Whether CSRF protection should be enabled for the route.
Defaults to ``True``. See :ref:`CSRF Protection
<csrf>` for more.
.. _csrf:
.. admonition:: CSRF Protection
:class: alert-warning
.. versionadded:: 9.0
Odoo implements token-based `CSRF protection
<https://en.wikipedia.org/wiki/CSRF>`_.
CSRF protection is enabled by default and applies to *UNSAFE*
HTTP methods as defined by :rfc:`7231` (all methods other than
``GET``, ``HEAD``, ``TRACE`` and ``OPTIONS``).
CSRF protection is implemented by checking requests using
unsafe methods for a value called ``csrf_token`` as part of
the request's form data. That value is removed from the form
as part of the validation and does not have to be taken in
account by your own form processing.
When adding a new controller for an unsafe method (mostly POST
for e.g. forms):
* if the form is generated in Python, a csrf token is
available via :meth:`request.csrf_token()
<odoo.http.WebRequest.csrf_token`, the
:data:`~odoo.http.request` object is available by default
in QWeb (python) templates, it may have to be added
explicitly if you are not using QWeb.
* if the form is generated in Javascript, the CSRF token is
added by default to the QWeb (js) rendering context as
``csrf_token`` and is otherwise available as ``csrf_token``
on the ``web.core`` module:
.. code-block:: javascript
require('web.core').csrf_token
* if the endpoint can be called by external parties (not from
Odoo) as e.g. it is a REST API or a `webhook
<https://en.wikipedia.org/wiki/Webhook>`_, CSRF protection
must be disabled on the endpoint. If possible, you may want
to implement other methods of request validation (to ensure
it is not called by an unrelated third-party).
"""
routing = kw.copy()
assert 'type' not in routing or routing['type'] in ("http", "json")
def decorator(f):
if route:
if isinstance(route, list):
routes = route
else:
routes = [route]
routing['routes'] = routes
wrong = routing.pop('method', None)
if wrong:
kw.setdefault('methods', wrong)
_logger.warning("<function %s.%s> defined with invalid routing parameter 'method', assuming 'methods'", f.__module__, f.__name__)
@functools.wraps(f)
def response_wrap(*args, **kw):
# if controller cannot be called with extra args (utm, debug, ...), call endpoint ignoring them
params = inspect.signature(f).parameters.values()
is_kwargs = lambda p: p.kind == inspect.Parameter.VAR_KEYWORD
if not any(is_kwargs(p) for p in params): # missing **kw
is_keyword_compatible = lambda p: p.kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY)
fargs = {p.name for p in params if is_keyword_compatible(p)}
ignored = ['<%s=%s>' % (k, kw.pop(k)) for k in list(kw) if k not in fargs]
if ignored:
_logger.info("<function %s.%s> called ignoring args %s" % (f.__module__, f.__name__, ', '.join(ignored)))
response = f(*args, **kw)
if isinstance(response, Response) or f.routing_type == 'json':
return response
if isinstance(response, (bytes, str)):
return Response(response)
if isinstance(response, werkzeug.exceptions.HTTPException):
response = response.get_response(request.httprequest.environ)
if isinstance(response, werkzeug.wrappers.Response):
response = Response.force_type(response)
response.set_default()
return response
_logger.warning("<function %s.%s> returns an invalid response type for an http request" % (f.__module__, f.__name__))
return response
response_wrap.routing = routing
response_wrap.original_func = f
return response_wrap
return decorator
class JsonRequest(WebRequest):
""" Request handler for `JSON-RPC 2
<http://www.jsonrpc.org/specification>`_ over HTTP
* ``method`` is ignored
* ``params`` must be a JSON object (not an array) and is passed as keyword
arguments to the handler method
* the handler method's result is returned as JSON-RPC ``result`` and
wrapped in the `JSON-RPC Response
<http://www.jsonrpc.org/specification#response_object>`_
Successful request::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"result": { "res1": "val1" },
"id": null}
Request producing a error::
--> {"jsonrpc": "2.0",
"method": "call",
"params": {"context": {},
"arg1": "val1" },
"id": null}
<-- {"jsonrpc": "2.0",
"error": {"code": 1,
"message": "End user error message.",
"data": {"code": "codestring",
"debug": "traceback" } },
"id": null}
"""
_request_type = "json"
def __init__(self, *args):
super(JsonRequest, self).__init__(*args)
self.params = {}
args = self.httprequest.args
request = None
request_id = args.get('id')
# regular jsonrpc2
request = self.httprequest.get_data().decode(self.httprequest.charset)
# Read POST content or POST Form Data named "request"
try:
self.jsonrequest = json.loads(request)
except ValueError:
msg = 'Invalid JSON data: %r' % (request,)
_logger.info('%s: %s', self.httprequest.path, msg)
raise werkzeug.exceptions.BadRequest(msg)
self.params = dict(self.jsonrequest.get("params", {}))
self.context = self.params.pop('context', dict(self.session.context))
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.jsonrequest.get('id')
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
mime = 'application/json'
body = json.dumps(response, default=date_utils.json_default)
return Response(
body, status=error and error.pop('http_status', 200) or 200,
headers=[('Content-Type', mime), ('Content-Length', len(body))]
)
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(JsonRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, SessionExpiredException):
if exception.args and exception.args[0] == "bus.Bus not available in test mode":
_logger.info(exception)
elif isinstance(exception, (odoo.exceptions.UserError,
werkzeug.exceptions.NotFound)):
_logger.warning(exception)
else:
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': serialize_exception(exception),
}
if isinstance(exception, werkzeug.exceptions.NotFound):
error['http_status'] = 404
error['code'] = 404
error['message'] = "404: Not Found"
if isinstance(exception, AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
def dispatch(self):
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
rpc_response_flag = rpc_response.isEnabledFor(logging.DEBUG)
if rpc_request_flag or rpc_response_flag:
endpoint = self.endpoint.method.__name__
model = self.params.get('model')
method = self.params.get('method')
args = self.params.get('args', [])
start_time = time.time()
start_memory = 0
if psutil:
start_memory = memory_info(psutil.Process(os.getpid()))
if rpc_request and rpc_response_flag:
rpc_request.debug('%s: %s %s, %s',
endpoint, model, method, pprint.pformat(args))
result = self._call_function(**self.params)
if rpc_request_flag or rpc_response_flag:
end_time = time.time()
end_memory = 0
if psutil:
end_memory = memory_info(psutil.Process(os.getpid()))
logline = '%s: %s %s: time:%.3fs mem: %sk -> %sk (diff: %sk)' % (
endpoint, model, method, end_time - start_time, start_memory / 1024, end_memory / 1024, (end_memory - start_memory)/1024)
if rpc_response_flag:
rpc_response.debug('%s, %s', logline, pprint.pformat(result))
else:
rpc_request.debug(logline)
return self._json_response(result)
def serialize_exception(e):
return {
"name": type(e).__module__ + "." + type(e).__name__ if type(e).__module__ else type(e).__name__,
"debug": traceback.format_exc(),
"message": ustr(e),
"arguments": e.args,
"context": getattr(e, 'context', {}),
}
class HttpRequest(WebRequest):
""" Handler for the ``http`` request type.
matched routing parameters, query string parameters, form_ parameters
and files are passed to the handler method as keyword arguments.
In case of name conflict, routing parameters have priority.
The handler method's result can be:
* a falsy value, in which case the HTTP response will be an
`HTTP 204`_ (No Content)
* a werkzeug Response object, which is returned as-is
* a ``str`` or ``unicode``, will be wrapped in a Response object and
interpreted as HTML
.. _form: http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2
.. _HTTP 204: http://tools.ietf.org/html/rfc7231#section-6.3.5
"""
_request_type = "http"
def __init__(self, *args):
super(HttpRequest, self).__init__(*args)
params = collections.OrderedDict(self.httprequest.args)
params.update(self.httprequest.form)
params.update(self.httprequest.files)
params.pop('session_id', None)
self.params = params
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to abitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(HttpRequest, self)._handle_exception(exception)
except SessionExpiredException:
if not request.params.get('noredirect'):
redirect = request.httprequest.url
query = werkzeug.urls.url_encode({
'redirect': redirect,
})
return request.redirect('/web/login?%s' % query)
except werkzeug.exceptions.HTTPException as e:
return e
def _is_cors_preflight(self, endpoint):
return request.httprequest.method == 'OPTIONS' and endpoint and endpoint.routing.get('cors')
def dispatch(self):
if self._is_cors_preflight(request.endpoint):
headers = {
'Access-Control-Max-Age': 60 * 60 * 24,
'Access-Control-Allow-Headers': 'Origin, X-Requested-With, Content-Type, Accept, Authorization'
}
return Response(status=200, headers=headers)
if request.httprequest.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE') \
and request.endpoint.routing.get('csrf', True): # csrf checked by default
token = self.params.pop('csrf_token', None)
if not self.validate_csrf(token):
if token is not None:
_logger.warning("CSRF validation failed on path '%s'",
request.httprequest.path)
else:
_logger.warning("""No CSRF validation token provided for path '%s'
Odoo URLs are CSRF-protected by default (when accessed with unsafe
HTTP methods). See
https://www.odoo.com/documentation/15.0/developer/reference/addons/http.html#csrf for
more details.
* if this endpoint is accessed through Odoo via py-QWeb form, embed a CSRF
token in the form, Tokens are available via `request.csrf_token()`
can be provided through a hidden input and must be POST-ed named
`csrf_token` e.g. in your form add:
<input type="hidden" name="csrf_token" t-att-value="request.csrf_token()"/>
* if the form is generated or posted in javascript, the token value is
available as `csrf_token` on `web.core` and as the `csrf_token`
value in the default js-qweb execution context
* if the form is accessed by an external third party (e.g. REST API
endpoint, payment gateway callback) you will need to disable CSRF
protection (and implement your own protection if necessary) by
passing the `csrf=False` parameter to the `route` decorator.
""", request.httprequest.path)
raise werkzeug.exceptions.BadRequest('Session expired (invalid CSRF token)')
r = self._call_function(**self.params)
if not r:
r = Response(status=204) # no content
return r
def make_response(self, data, headers=None, cookies=None):
""" Helper for non-HTML responses, or HTML responses with custom
response headers or cookies.
While handlers can just return the HTML markup of a page they want to
send as a string if non-HTML data is returned they need to create a
complete response object, or the returned data will not be correctly
interpreted by the clients.
:param basestring data: response body
:param headers: HTTP headers to set on the response
:type headers: ``[(name, value)]``
:param collections.abc.Mapping cookies: cookies to set on the client
"""
response = Response(data, headers=headers)
if cookies:
for k, v in cookies.items():
response.set_cookie(k, v)
return response
def render(self, template, qcontext=None, lazy=True, **kw):
""" Lazy render of a QWeb template.
The actual rendering of the given template will occur at then end of
the dispatching. Meanwhile, the template and/or qcontext can be
altered or even replaced by a static response.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param bool lazy: whether the template rendering should be deferred
until the last possible moment
:param kw: forwarded to werkzeug's Response object
"""
response = Response(template=template, qcontext=qcontext, **kw)
if not lazy:
return response.render()
return response
def not_found(self, description=None):
""" Shortcut for a `HTTP 404
<http://tools.ietf.org/html/rfc7231#section-6.5.4>`_ (Not Found)
response
"""
return werkzeug.exceptions.NotFound(description)
#----------------------------------------------------------
# Controller and route registration
#----------------------------------------------------------
addons_manifest = {}
controllers_per_module = collections.defaultdict(list)
class ControllerType(type):
def __init__(cls, name, bases, attrs):
super(ControllerType, cls).__init__(name, bases, attrs)
# flag old-style methods with req as first argument
for k, v in attrs.items():
if inspect.isfunction(v) and hasattr(v, 'original_func'):
# Set routing type on original functions
routing_type = v.routing.get('type')
parent = [claz for claz in bases if isinstance(claz, ControllerType) and hasattr(claz, k)]
parent_routing_type = getattr(parent[0], k).original_func.routing_type if parent else routing_type or 'http'
if routing_type is not None and routing_type is not parent_routing_type:
routing_type = parent_routing_type
_logger.warning("Subclass re-defines <function %s.%s.%s> with different type than original."
" Will use original type: %r" % (cls.__module__, cls.__name__, k, parent_routing_type))
v.original_func.routing_type = routing_type or parent_routing_type
sign = inspect.signature(v.original_func)
first_arg = list(sign.parameters)[1] if len(sign.parameters) >= 2 else None
if first_arg in ["req", "request"]:
v._first_arg_is_req = True
# store the controller in the controllers list
name_class = ("%s.%s" % (cls.__module__, cls.__name__), cls)
class_path = name_class[0].split(".")
if not class_path[:2] == ["odoo", "addons"]:
module = ""
else:
# we want to know all modules that have controllers
module = class_path[2]
# but we only store controllers directly inheriting from Controller
if not "Controller" in globals() or not Controller in bases:
return
controllers_per_module[module].append(name_class)
Controller = ControllerType('Controller', (object,), {})
class EndPoint(object):
def __init__(self, method, routing):
self.method = method
self.original = getattr(method, 'original_func', method)
self.routing = frozendict(routing)
self.arguments = {}
@property
def first_arg_is_req(self):
# Backward for 7.0
return getattr(self.method, '_first_arg_is_req', False)
def __call__(self, *args, **kw):
return self.method(*args, **kw)
# werkzeug will use these EndPoint objects as keys of a dictionary
# (the RoutingMap._rules_by_endpoint mapping).
# When Odoo clears the routing map, new EndPoint objects are created,
# most of them with the same values.
# The __eq__ and __hash__ magic methods allow older EndPoint objects
# to be still valid keys of the RoutingMap.
# For example, website._get_canonical_url_localized may use
# such an old endpoint if the routing map was cleared.
def __eq__(self, other):
try:
return self._as_tuple() == other._as_tuple()
except AttributeError:
return False
def __hash__(self):
return hash(self._as_tuple())
def _as_tuple(self):
return (self.original, self.routing)
def __repr__(self):
return '<EndPoint method=%r routing=%r>' % (self.method, self.routing)
def _generate_routing_rules(modules, nodb_only, converters=None):
def get_subclasses(klass):
def valid(c):
return c.__module__.startswith('odoo.addons.') and c.__module__.split(".")[2] in modules
subclasses = klass.__subclasses__()
result = []
for subclass in subclasses:
if valid(subclass):
result.extend(get_subclasses(subclass))
if not result and valid(klass):
result = [klass]
return result
for module in modules:
if module not in controllers_per_module:
continue
for _, cls in controllers_per_module[module]:
subclasses = list(unique(c for c in get_subclasses(cls) if c is not cls))
if subclasses:
name = "%s (extended by %s)" % (cls.__name__, ', '.join(sub.__name__ for sub in subclasses))
cls = type(name, tuple(reversed(subclasses)), {})
o = cls()
members = inspect.getmembers(o, inspect.ismethod)
for _, mv in members:
if hasattr(mv, 'routing'):
routing = dict(type='http', auth='user', methods=None, routes=None)
methods_done = list()
# update routing attributes from subclasses(auth, methods...)
for claz in reversed(mv.__self__.__class__.mro()):
fn = getattr(claz, mv.__name__, None)
if fn and hasattr(fn, 'routing') and fn not in methods_done:
methods_done.append(fn)
routing.update(fn.routing)
if not nodb_only or routing['auth'] == "none":
assert routing['routes'], "Method %r has not route defined" % mv
endpoint = EndPoint(mv, routing)
for url in routing['routes']:
yield (url, endpoint, routing)
#----------------------------------------------------------
# HTTP Sessions
#----------------------------------------------------------
class AuthenticationError(Exception):
pass
class SessionExpiredException(Exception):
pass
class OpenERPSession(sessions.Session):
def __init__(self, *args, **kwargs):
self.inited = False
self.modified = False
self.rotate = False
super(OpenERPSession, self).__init__(*args, **kwargs)
self.inited = True
self._default_values()
self.modified = False
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, k, v):
if getattr(self, "inited", False):
try:
object.__getattribute__(self, k)
except:
return self.__setitem__(k, v)
object.__setattr__(self, k, v)
def authenticate(self, db, login=None, password=None):
"""
Authenticate the current user with the given db, login and
password. If successful, store the authentication parameters in the
current session and request, unless multi-factor-authentication
is activated. In that case, that last part will be done by
:ref:`finalize`.
"""
wsgienv = request.httprequest.environ
env = dict(
interactive=True,
base_location=request.httprequest.url_root.rstrip('/'),
HTTP_HOST=wsgienv['HTTP_HOST'],
REMOTE_ADDR=wsgienv['REMOTE_ADDR'],
)
uid = odoo.registry(db)['res.users'].authenticate(db, login, password, env)
self.pre_uid = uid
self.rotate = True
self.db = db
self.login = login
user = request.env(user=uid)['res.users'].browse(uid)
if not user._mfa_url():
self.finalize()
return uid
def finalize(self):
""" Finalizes a partial session, should be called on MFA validation to
convert a partial / pre-session into a full-fledged "logged-in" one
"""
self.rotate = True
request.uid = self.uid = self.pop('pre_uid')
user = request.env(user=self.uid)['res.users'].browse(self.uid)
self.session_token = user._compute_session_token(self.sid)
self.get_context()
def check_security(self):
"""
Check the current authentication parameters to know if those are still
valid. This method should be called at each request. If the
authentication fails, a :exc:`SessionExpiredException` is raised.
"""
if not self.db or not self.uid:
raise SessionExpiredException("Session expired")
# We create our own environment instead of the request's one.
# to avoid creating it without the uid since request.uid isn't set yet
env = odoo.api.Environment(request.cr, self.uid, self.context)
# here we check if the session is still valid
if not security.check_session(self, env):
raise SessionExpiredException("Session expired")
def logout(self, keep_db=False):
for k in list(self):
if not (keep_db and k == 'db') and k != 'debug':
del self[k]
self._default_values()
self.rotate = True
def _default_values(self):
self.setdefault("db", None)
self.setdefault("uid", None)
self.setdefault("login", None)
self.setdefault("session_token", None)
self.setdefault("context", {})
self.setdefault("debug", '')
def get_context(self):
"""
Re-initializes the current user's session context (based on his
preferences) by calling res.users.get_context() with the old context.
:returns: the new context
"""
assert self.uid, "The user needs to be logged-in to initialize his context"
self.context = dict(request.env['res.users'].context_get() or {})
self.context['uid'] = self.uid
self._fix_lang(self.context)
return self.context
def _fix_lang(self, context):
""" OpenERP provides languages which may not make sense and/or may not
be understood by the web client's libraries.
Fix those here.
:param dict context: context to fix
"""
lang = context.get('lang')
# inane OpenERP locale
if lang == 'ar_AR':
lang = 'ar'
# lang to lang_REGION (datejs only handles lang_REGION, no bare langs)
if lang in babel.core.LOCALE_ALIASES:
lang = babel.core.LOCALE_ALIASES[lang]
context['lang'] = lang or 'en_US'
def save_action(self, action):
"""
This method store an action object in the session and returns an integer
identifying that action. The method get_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = self.setdefault('saved_actions', {"next": 1, "actions": {}})
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = action
saved_actions["next"] = key + 1
self.modified = True
return key
def get_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = self.get('saved_actions', {})
return saved_actions.get("actions", {}).get(key)
def session_gc(session_store):
if random.random() < 0.001:
# we keep session one week
last_week = time.time() - 60*60*24*7
for fname in os.listdir(session_store.path):
path = os.path.join(session_store.path, fname)
try:
if os.path.getmtime(path) < last_week:
os.unlink(path)
except OSError:
pass
ODOO_DISABLE_SESSION_GC = str2bool(os.environ.get('ODOO_DISABLE_SESSION_GC', '0'))
if ODOO_DISABLE_SESSION_GC:
# empty function, in case another module would be
# calling it out of setup_session()
session_gc = lambda s: None
#----------------------------------------------------------
# WSGI Layer
#----------------------------------------------------------
# Add potentially missing (older ubuntu) font mime types
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
# Add potentially missing (detected on windows) svg mime types
mimetypes.add_type('image/svg+xml', '.svg')
class Response(werkzeug.wrappers.Response):
""" Response object passed through controller route chain.
In addition to the :class:`werkzeug.wrappers.Response` parameters, this
class's constructor can take the following additional parameters
for QWeb Lazy Rendering.
:param basestring template: template to render
:param dict qcontext: Rendering context to use
:param int uid: User id to use for the ir.ui.view render call,
``None`` to use the request's user (the default)
these attributes are available as parameters on the Response object and
can be altered at any time before rendering
Also exposes all the attributes and methods of
:class:`werkzeug.wrappers.Response`.
"""
default_mimetype = 'text/html'
def __init__(self, *args, **kw):
template = kw.pop('template', None)
qcontext = kw.pop('qcontext', None)
uid = kw.pop('uid', None)
super(Response, self).__init__(*args, **kw)
self.set_default(template, qcontext, uid)
def set_default(self, template=None, qcontext=None, uid=None):
self.template = template
self.qcontext = qcontext or dict()
self.qcontext['response_template'] = self.template
self.uid = uid
# Support for Cross-Origin Resource Sharing
if request.endpoint and 'cors' in request.endpoint.routing:
self.headers.set('Access-Control-Allow-Origin', request.endpoint.routing['cors'])
methods = 'GET, POST'
if request.endpoint.routing['type'] == 'json':
methods = 'POST'
elif request.endpoint.routing.get('methods'):
methods = ', '.join(request.endpoint.routing['methods'])
self.headers.set('Access-Control-Allow-Methods', methods)
@property
def is_qweb(self):
return self.template is not None
def render(self):
""" Renders the Response's template, returns the result
"""
env = request.env(user=self.uid or request.uid or odoo.SUPERUSER_ID)
self.qcontext['request'] = request
return env["ir.ui.view"]._render_template(self.template, self.qcontext)
def flatten(self):
""" Forces the rendering of the response's template, sets the result
as response body and unsets :attr:`.template`
"""
if self.template:
self.response.append(self.render())
self.template = None
class DisableCacheMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def start_wrapped(status, headers):
req = werkzeug.wrappers.Request(environ)
root.setup_session(req)
if req.session and req.session.debug and not 'wkhtmltopdf' in req.headers.get('User-Agent'):
if "assets" in req.session.debug and (".js" in req.base_url or ".css" in req.base_url):
new_cache_control = 'no-store'
else:
new_cache_control = 'no-cache'
cache_control_value = new_cache_control
new_headers = []
for k, v in headers:
if k.lower() != 'cache-control':
new_headers.append((k, v))
elif new_cache_control not in v:
cache_control_value += ', %s' % v
new_headers.append(('Cache-Control', cache_control_value))
start_response(status, new_headers)
else:
start_response(status, headers)
return self.app(environ, start_wrapped)
class Root(object):
"""Root WSGI application for the OpenERP Web Client.
"""
def __init__(self):
self._loaded = False
@lazy_property
def session_store(self):
# Setup http sessions
path = odoo.tools.config.session_dir
_logger.debug('HTTP sessions stored in: %s', path)
if ODOO_DISABLE_SESSION_GC:
_logger.info('Default session GC disabled, manual GC required.')
return sessions.FilesystemSessionStore(
path, session_class=OpenERPSession, renew_missing=True)
@lazy_property
def nodb_routing_map(self):
_logger.info("Generating nondb routing")
routing_map = werkzeug.routing.Map(strict_slashes=False, converters=None)
for url, endpoint, routing in odoo.http._generate_routing_rules([''] + odoo.conf.server_wide_modules, True):
rule = werkzeug.routing.Rule(url, endpoint=endpoint, methods=routing['methods'])
rule.merge_slashes = False
routing_map.add(rule)
return routing_map
def __call__(self, environ, start_response):
""" Handle a WSGI request
"""
if not self._loaded:
self._loaded = True
self.load_addons()
return self.dispatch(environ, start_response)
def load_addons(self):
""" Load all addons from addons path containing static files and
controllers and configure them. """
# TODO should we move this to ir.http so that only configured modules are served ?
statics = {}
manifests = addons_manifest
for addons_path in odoo.addons.__path__:
for module in sorted(os.listdir(str(addons_path))):
if module not in manifests:
# Deal with the manifest first
mod_path = opj(addons_path, module)
manifest = read_manifest(addons_path, module)
if not manifest or (not manifest.get('installable', True) and 'assets' not in manifest):
continue
manifest['addons_path'] = addons_path
manifests[module] = manifest
# Then deal with the statics
path_static = opj(addons_path, module, 'static')
if os.path.isdir(path_static):
_logger.debug("Loading %s", module)
statics['/%s/static' % module] = path_static
if statics:
_logger.info("HTTP Configuring static files")
app = SharedDataMiddleware(self.dispatch, statics, cache_timeout=STATIC_CACHE)
self.dispatch = DisableCacheMiddleware(app)
def setup_session(self, httprequest):
# recover or create session
session_gc(self.session_store)
sid = httprequest.args.get('session_id')
explicit_session = True
if not sid:
sid = httprequest.headers.get("X-Openerp-Session-Id")
if not sid:
sid = httprequest.cookies.get('session_id')
explicit_session = False
if sid is None:
httprequest.session = self.session_store.new()
else:
httprequest.session = self.session_store.get(sid)
return explicit_session
def setup_db(self, httprequest):
db = httprequest.session.db
# Check if session.db is legit
if db:
if db not in db_filter([db], httprequest=httprequest):
_logger.warning("Logged into database '%s', but dbfilter "
"rejects it; logging session out.", db)
httprequest.session.logout()
db = None
if not db:
httprequest.session.db = db_monodb(httprequest)
def setup_lang(self, httprequest):
if "lang" not in httprequest.session.context:
alang = httprequest.accept_languages.best or "en-US"
try:
code, territory, _, _ = babel.core.parse_locale(alang, sep='-')
if territory:
lang = '%s_%s' % (code, territory)
else:
lang = babel.core.LOCALE_ALIASES[code]
except (ValueError, KeyError):
lang = 'en_US'
httprequest.session.context["lang"] = lang
def get_request(self, httprequest):
# deduce type of request
if httprequest.mimetype in ("application/json", "application/json-rpc"):
return JsonRequest(httprequest)
else:
return HttpRequest(httprequest)
def get_response(self, httprequest, result, explicit_session):
if isinstance(result, Response) and result.is_qweb:
try:
result.flatten()
except Exception as e:
if request.db:
result = request.registry['ir.http']._handle_exception(e)
else:
raise
if isinstance(result, (bytes, str)):
response = Response(result, mimetype='text/html')
else:
response = result
self.set_csp(response)
save_session = (not request.endpoint) or request.endpoint.routing.get('save_session', True)
if not save_session:
return response
if httprequest.session.should_save:
if httprequest.session.rotate:
self.session_store.delete(httprequest.session)
httprequest.session.sid = self.session_store.generate_key()
if httprequest.session.uid:
httprequest.session.session_token = security.compute_session_token(httprequest.session, request.env)
httprequest.session.modified = True
self.session_store.save(httprequest.session)
# We must not set the cookie if the session id was specified using a http header or a GET parameter.
# There are two reasons to this:
# - When using one of those two means we consider that we are overriding the cookie, which means creating a new
# session on top of an already existing session and we don't want to create a mess with the 'normal' session
# (the one using the cookie). That is a special feature of the Session Javascript class.
# - It could allow session fixation attacks.
if not explicit_session and hasattr(response, 'set_cookie'):
response.set_cookie(
'session_id', httprequest.session.sid, max_age=90 * 24 * 60 * 60, httponly=True)
return response
def set_csp(self, response):
# ignore HTTP errors
if not isinstance(response, werkzeug.wrappers.Response):
return
headers = response.headers
if 'Content-Security-Policy' in headers:
return
mime, _params = cgi.parse_header(headers.get('Content-Type', ''))
if not mime.startswith('image/'):
return
headers['Content-Security-Policy'] = "default-src 'none'"
def dispatch(self, environ, start_response):
"""
Performs the actual WSGI dispatching for the application.
"""
try:
httprequest = werkzeug.wrappers.Request(environ)
httprequest.user_agent_class = UserAgent # use vendored userAgent since it will be removed in 2.1
httprequest.parameter_storage_class = werkzeug.datastructures.ImmutableOrderedMultiDict
current_thread = threading.current_thread()
current_thread.url = httprequest.url
current_thread.query_count = 0
current_thread.query_time = 0
current_thread.perf_t0 = time.time()
explicit_session = self.setup_session(httprequest)
self.setup_db(httprequest)
self.setup_lang(httprequest)
request = self.get_request(httprequest)
def _dispatch_nodb():
try:
func, arguments = self.nodb_routing_map.bind_to_environ(request.httprequest.environ).match()
except werkzeug.exceptions.HTTPException as e:
return request._handle_exception(e)
request.set_handler(func, arguments, "none")
try:
result = request.dispatch()
except Exception as e:
return request._handle_exception(e)
return result
request_manager = request
if request.session.profile_session:
request_manager = self.get_profiler_context_manager(request)
with request_manager:
db = request.session.db
if db:
try:
odoo.registry(db).check_signaling()
with odoo.tools.mute_logger('odoo.sql_db'):
ir_http = request.registry['ir.http']
except (AttributeError, psycopg2.OperationalError, psycopg2.ProgrammingError):
# psycopg2 error or attribute error while constructing
# the registry. That means either
# - the database probably does not exists anymore
# - the database is corrupted
# - the database version doesn't match the server version
# Log the user out and fall back to nodb
request.session.logout()
if request.httprequest.path == '/web':
# Internal Server Error
raise
else:
# If requesting /web this will loop
result = _dispatch_nodb()
else:
result = ir_http._dispatch()
else:
result = _dispatch_nodb()
response = self.get_response(httprequest, result, explicit_session)
return response(environ, start_response)
except werkzeug.exceptions.HTTPException as e:
return e(environ, start_response)
def get_profiler_context_manager(self, request):
""" Return a context manager that combines a profiler and ``request``. """
if request.session.profile_session and request.session.db:
if request.session.profile_expiration < str(datetime.now()):
# avoid having session profiling for too long if user forgets to disable profiling
request.session.profile_session = None
_logger.warning("Profiling expiration reached, disabling profiling")
elif 'set_profiling' in request.httprequest.path:
_logger.debug("Profiling disabled on set_profiling route")
elif request.httprequest.path.startswith('/longpolling'):
_logger.debug("Profiling disabled for longpolling")
elif odoo.evented:
# only longpolling should be in a evented server, but this is an additional safety
_logger.debug("Profiling disabled for evented server")
else:
try:
prof = profiler.Profiler(
db=request.session.db,
description=request.httprequest.full_path,
profile_session=request.session.profile_session,
collectors=request.session.profile_collectors,
params=request.session.profile_params,
)
return profiler.Nested(prof, request)
except Exception:
_logger.exception("Failure during Profiler creation")
request.session.profile_session = None
return request
def get_db_router(self, db):
if not db:
return self.nodb_routing_map
return request.registry['ir.http'].routing_map()
def db_list(force=False, httprequest=None):
try:
dbs = odoo.service.db.list_dbs(force)
except psycopg2.OperationalError:
return []
return db_filter(dbs, httprequest=httprequest)
def db_filter(dbs, httprequest=None):
httprequest = httprequest or request.httprequest
h = httprequest.environ.get('HTTP_HOST', '').split(':')[0]
d, _, r = h.partition('.')
if d == "www" and r:
d = r.partition('.')[0]
if odoo.tools.config['dbfilter']:
d, h = re.escape(d), re.escape(h)
r = odoo.tools.config['dbfilter'].replace('%h', h).replace('%d', d)
dbs = [i for i in dbs if re.match(r, i)]
elif odoo.tools.config['db_name']:
# In case --db-filter is not provided and --database is passed, Odoo will
# use the value of --database as a comma separated list of exposed databases.
exposed_dbs = set(db.strip() for db in odoo.tools.config['db_name'].split(','))
dbs = sorted(exposed_dbs.intersection(dbs))
return dbs
def db_monodb(httprequest=None):
"""
Magic function to find the current database.
Implementation details:
* Magic
* More magic
Returns ``None`` if the magic is not magic enough.
"""
httprequest = httprequest or request.httprequest
dbs = db_list(True, httprequest)
# try the db already in the session
db_session = httprequest.session.db
if db_session in dbs:
return db_session
# if there is only one possible db, we take that one
if len(dbs) == 1:
return dbs[0]
return None
def send_file(filepath_or_fp, mimetype=None, as_attachment=False, filename=None, mtime=None,
add_etags=True, cache_timeout=STATIC_CACHE, conditional=True):
"""This is a modified version of Flask's send_file()
Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first.
:param filepath_or_fp: the filename of the file to send.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param filename: the filename for the attachment if it differs from the file's filename or
if using file object without 'name' attribute (eg: E-tags with StringIO).
:param mtime: last modification time to use for contitional response.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `False` to disable conditional responses.
:param cache_timeout: the timeout in seconds for the headers.
"""
if isinstance(filepath_or_fp, str):
if not filename:
filename = os.path.basename(filepath_or_fp)
file = open(filepath_or_fp, 'rb')
if not mtime:
mtime = os.path.getmtime(filepath_or_fp)
else:
file = filepath_or_fp
if not filename:
filename = getattr(file, 'name', None)
file.seek(0, 2)
size = file.tell()
file.seek(0)
if mimetype is None and filename:
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = werkzeug.datastructures.Headers()
if as_attachment:
if filename is None:
raise TypeError('filename unavailable, required for sending as attachment')
headers.add('Content-Disposition', 'attachment', filename=filename)
headers['Content-Length'] = size
data = wrap_file(request.httprequest.environ, file)
rv = Response(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
if isinstance(mtime, str):
try:
server_format = odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
mtime = datetime.strptime(mtime.split('.')[0], server_format)
except Exception:
mtime = None
if mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time.time() + cache_timeout)
if add_etags and filename and mtime:
rv.set_etag('odoo-%s-%s-%s' % (
mtime,
size,
adler32(
filename.encode('utf-8') if isinstance(filename, str)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request.httprequest)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def content_disposition(filename):
filename = odoo.tools.ustr(filename)
escaped = urls.url_quote(filename, safe='')
return "attachment; filename*=UTF-8''%s" % escaped
def set_safe_image_headers(headers, content):
"""Return new headers based on `headers` but with `Content-Length` and
`Content-Type` set appropriately depending on the given `content` only if it
is safe to do, as well as `X-Content-Type-Options: nosniff` so that if the
file is of an unsafe type, it is not interpreted as that type if the
`Content-type` header was already set to a different mimetype
"""
headers = werkzeug.datastructures.Headers(headers)
safe_types = {'image/jpeg', 'image/png', 'image/gif', 'image/x-icon'}
content_type = guess_mimetype(content)
if content_type in safe_types:
headers['Content-Type'] = content_type
headers['X-Content-Type-Options'] = 'nosniff'
headers['Content-Length'] = len(content)
return list(headers)
# main wsgi handler
root = Root()
| 40.092128 | 68,758 |
29,251 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import itertools
import logging
import time
import uuid
import warnings
from decorator import decorator
import psycopg2
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
from werkzeug import urls
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
real_time = time.time.__call__ # ensure we have a non patched time for query times when using freezegun
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
from . import tools
from .tools.func import frame_codeinfo
from .tools import parse_version as pv
if pv(psycopg2.__version__) < pv('2.7'):
from psycopg2._psycopg import QuotedString
def adapt_string(adapted):
"""Python implementation of psycopg/psycopg2#459 from v2.7"""
if '\x00' in adapted:
raise ValueError("A string literal cannot contain NUL (0x00) characters.")
return QuotedString(adapted)
psycopg2.extensions.register_adapter(str, adapt_string)
from datetime import timedelta
import threading
from inspect import currentframe
def flush_env(cr, *, clear=True):
warnings.warn("Since Odoo 15.0, use cr.flush() instead of flush_env(cr).",
DeprecationWarning, stacklevel=2)
cr.flush()
if clear:
cr.clear()
def clear_env(cr):
warnings.warn("Since Odoo 15.0, use cr.clear() instead of clear_env(cr).",
DeprecationWarning, stacklevel=2)
cr.clear()
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
@decorator
def check(f, self, *args, **kwargs):
""" Wrap a cursor method that cannot be called when the cursor is closed. """
if self._closed:
raise psycopg2.OperationalError('Unable to use a closed cursor.')
return f(self, *args, **kwargs)
class BaseCursor:
""" Base class for cursors that manage pre/post commit hooks. """
def __init__(self):
self.precommit = tools.Callbacks()
self.postcommit = tools.Callbacks()
self.prerollback = tools.Callbacks()
self.postrollback = tools.Callbacks()
# By default a cursor has no transaction object. A transaction object
# for managing environments is instantiated by registry.cursor(). It
# is not done here in order to avoid cyclic module dependencies.
self.transaction = None
def flush(self):
""" Flush the current transaction, and run precommit hooks. """
if self.transaction is not None:
self.transaction.flush()
self.precommit.run()
def clear(self):
""" Clear the current transaction, and clear precommit hooks. """
if self.transaction is not None:
self.transaction.clear()
self.precommit.clear()
def reset(self):
""" Reset the current transaction (this invalidates more that clear()).
This method should be called only right after commit() or rollback().
"""
if self.transaction is not None:
self.transaction.reset()
@contextmanager
@check
def savepoint(self, flush=True):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
if flush:
self.flush()
self.execute('SAVEPOINT "%s"' % name)
try:
yield
if flush:
self.flush()
except Exception:
if flush:
self.clear()
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
else:
self.execute('RELEASE SAVEPOINT "%s"' % name)
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
if exc_type is None:
self.commit()
finally:
self.close()
class Cursor(BaseCursor):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def __init__(self, pool, dbname, dsn, serialized=True):
super().__init__()
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self._default_log_exceptions = True
self.cache = {}
self._now = None
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return [self.__build_dict(row) for row in self._obj.fetchmany(size)]
def dictfetchall(self):
return [self.__build_dict(row) for row in self._obj.fetchall()]
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.warning(msg)
self._close(True)
def _format(self, query, params=None):
encoding = psycopg2.extensions.encodings[self.connection.encoding]
return self._obj.mogrify(query, params).decode(encoding, 'replace')
@check
def execute(self, query, params=None, log_exceptions=None):
if params and not isinstance(params, (tuple, list, dict)):
# psycopg2's TypeError is not clear if you mess up the params
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
start = real_time()
try:
params = params or None
res = self._obj.execute(query, params)
except Exception as e:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("bad query: %s\nERROR: %s", tools.ustr(self._obj.query or query), e)
raise
finally:
delay = real_time() - start
if self.sql_log:
_logger.debug("[%.3f ms] query: %s", 1000 * delay, self._format(query, params))
# simple query count is always computed
self.sql_log_count += 1
current_thread = threading.current_thread()
if hasattr(current_thread, 'query_count'):
current_thread.query_count += 1
current_thread.query_time += delay
# optional hooks for performance and tracing analysis
for hook in getattr(current_thread, 'query_hooks', ()):
hook(self, query, params, start, delay)
# advanced stats only if sql_log is enabled
if self.sql_log:
delay *= 1E6
query_lower = self._obj.query.decode().lower()
res_from = re_from.match(query_lower)
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query_lower)
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids, size=None):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(size or self.IN_MAX, ids)
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
_logger.debug("SQL LOG %s:", type)
for r in sorted(sqllogitems, key=lambda k: k[1]):
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
# Clean the underlying connection, and run rollback hooks.
self.rollback()
self._closed = True
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
warnings.warn(
"Since Odoo 13.0, the ORM delays UPDATE queries for "
"performance reasons. Since then, using the ORM with "
" autocommit(True) is unsafe, as computed fields may not be "
"fully computed at commit.", DeprecationWarning, stacklevel=2)
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snapshot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def after(self, event, func):
""" Register an event handler.
:param event: the event, either `'commit'` or `'rollback'`
:param func: a callable object, called with no argument after the
event occurs
Be careful when coding an event handler, since any operation on the
cursor that was just committed/rolled back will take place in the
next transaction that has already begun, and may still be rolled
back or committed independently. You may consider the use of a
dedicated temporary cursor to do some database operation.
"""
warnings.warn(
"Cursor.after() is deprecated, use Cursor.postcommit.add() instead.",
DeprecationWarning,
)
if event == 'commit':
self.postcommit.add(func)
elif event == 'rollback':
self.postrollback.add(func)
@check
def commit(self):
""" Perform an SQL `COMMIT` """
self.flush()
result = self._cnx.commit()
self.clear()
self._now = None
self.prerollback.clear()
self.postrollback.clear()
self.postcommit.run()
return result
@check
def rollback(self):
""" Perform an SQL `ROLLBACK` """
self.clear()
self.postcommit.clear()
self.prerollback.run()
result = self._cnx.rollback()
self._now = None
self.postrollback.run()
return result
@check
def __getattr__(self, name):
return getattr(self._obj, name)
@property
def closed(self):
return self._closed
def now(self):
""" Return the transaction's timestamp ``NOW() AT TIME ZONE 'UTC'``. """
if self._now is None:
self.execute("SELECT (now() AT TIME ZONE 'UTC')")
self._now = self.fetchone()[0]
return self._now
class TestCursor(BaseCursor):
""" A pseudo-cursor to be used for tests, on top of a real cursor. It keeps
the transaction open across requests, and simulates committing, rolling
back, and closing:
test cursor | queries on actual cursor
------------------------+---------------------------------------
cr = TestCursor(...) | SAVEPOINT test_cursor_N
|
cr.execute(query) | query
|
cr.commit() | SAVEPOINT test_cursor_N
|
cr.rollback() | ROLLBACK TO SAVEPOINT test_cursor_N
|
cr.close() | ROLLBACK TO SAVEPOINT test_cursor_N
|
"""
_savepoint_seq = itertools.count()
def __init__(self, cursor, lock):
super().__init__()
self._closed = False
self._cursor = cursor
# we use a lock to serialize concurrent requests
self._lock = lock
self._lock.acquire()
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self._savepoint = "test_cursor_%s" % next(self._savepoint_seq)
self._cursor.execute('SAVEPOINT "%s"' % self._savepoint)
def close(self):
if not self._closed:
self.rollback()
self._closed = True
self._lock.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
@check
def commit(self):
""" Perform an SQL `COMMIT` """
self.flush()
self._cursor.execute('SAVEPOINT "%s"' % self._savepoint)
self.clear()
self.prerollback.clear()
self.postrollback.clear()
self.postcommit.clear() # TestCursor ignores post-commit hooks
@check
def rollback(self):
""" Perform an SQL `ROLLBACK` """
self.clear()
self.postcommit.clear()
self.prerollback.run()
self._cursor.execute('ROLLBACK TO SAVEPOINT "%s"' % self._savepoint)
self.postrollback.run()
def __getattr__(self, name):
value = getattr(self._cursor, name)
if callable(value) and self._closed:
raise psycopg2.OperationalError('Unable to use a closed cursor.')
return value
class PsycoConnection(psycopg2.extensions.connection):
def lobject(*args, **kwargs):
pass
if hasattr(psycopg2.extensions, 'ConnectionInfo'):
@property
def info(self):
class PsycoConnectionInfo(psycopg2.extensions.ConnectionInfo):
@property
def password(self):
pass
return PsycoConnectionInfo(self)
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, connection_info):
"""
:param dict connection_info: dict of psql connection keywords
:rtype: PsycoConnection
"""
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.info('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and self._dsn_equals(cnx.dsn, connection_info):
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(
connection_factory=PsycoConnection,
**connection_info)
except psycopg2.Error:
_logger.info('Connection to the database failed')
raise
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not belong to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or self._dsn_equals(cnx.dsn, dsn):
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
def _dsn_equals(self, dsn1, dsn2):
alias_keys = {'dbname': 'database'}
ignore_keys = ['password']
dsn1, dsn2 = ({
alias_keys.get(key, key): str(value)
for key, value in (isinstance(dsn, str) and self._dsn_to_dict(dsn) or dsn).items()
if key not in ignore_keys
} for dsn in (dsn1, dsn2))
return dsn1 == dsn2
def _dsn_to_dict(self, dsn):
return dict(value.split('=', 1) for value in dsn.strip().split())
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.__dbname = dbname
self.__dsn = dsn
self.__pool = pool
@property
def dsn(self):
dsn = dict(self.__dsn)
dsn.pop('password', None)
return dsn
@property
def dbname(self):
return self.__dbname
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.__dbname, self.__dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __bool__(self):
raise NotImplementedError()
__nonzero__ = __bool__
def connection_info_for(db_or_uri):
""" parse the given `db_or_uri` and return a 2-tuple (dbname, connection_params)
Connection params are either a dictionary with a single key ``dsn``
containing a connection URI, or a dictionary containing connection
parameter keywords which psycopg2 can build a key/value connection string
(dsn) from
:param str db_or_uri: database name or postgres dsn
:rtype: (str, dict)
"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urls.url_parse(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, {'dsn': db_or_uri}
connection_info = {'database': db_or_uri}
for p in ('host', 'port', 'user', 'password', 'sslmode'):
cfg = tools.config['db_' + p]
if cfg:
connection_info[p] = cfg
return db_or_uri, connection_info
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, info = connection_info_for(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, info)
def close_db(db_name):
""" You might want to call odoo.modules.registry.Registry.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(connection_info_for(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
| 36.472569 | 29,251 |
3,984 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""The Odoo Exceptions module defines a few core exception types.
Those types are understood by the RPC layer.
Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
.. note::
If you consider introducing new exceptions,
check out the :mod:`odoo.addons.test_exceptions` module.
"""
import logging
import warnings
_logger = logging.getLogger(__name__)
class UserError(Exception):
"""Generic error managed by the client.
Typically when the user tries to do something that has no sense given the current
state of a record. Semantically comparable to the generic 400 HTTP status codes.
"""
def __init__(self, message):
"""
:param message: exception message and frontend modal content
"""
super().__init__(message)
@property
def name(self):
warnings.warn(
"UserError attribute 'name' is a deprecated alias to args[0]",
DeprecationWarning)
return self.args[0]
class RedirectWarning(Exception):
""" Warning with a possibility to redirect the user instead of simply
displaying the warning message.
:param str message: exception message and frontend modal content
:param int action_id: id of the action where to perform the redirection
:param str button_text: text to put on the button that will trigger
the redirection.
:param dict additional_context: parameter passed to action_id.
Can be used to limit a view to active_ids for example.
"""
def __init__(self, message, action, button_text, additional_context=None):
super().__init__(message, action, button_text, additional_context)
# using this RedirectWarning won't crash if used as an UserError
@property
def name(self):
warnings.warn(
"RedirectWarning attribute 'name' is a deprecated alias to args[0]",
DeprecationWarning)
return self.args[0]
class AccessDenied(UserError):
"""Login/password error.
.. note::
No traceback.
.. admonition:: Example
When you try to log with a wrong password.
"""
def __init__(self, message="Access Denied"):
super().__init__(message)
self.with_traceback(None)
self.__cause__ = None
self.traceback = ('', '', '')
class AccessError(UserError):
"""Access rights error.
.. admonition:: Example
When you try to read a record that you are not allowed to.
"""
class CacheMiss(KeyError):
"""Missing value(s) in cache.
.. admonition:: Example
When you try to read a value in a flushed cache.
"""
def __init__(self, record, field):
super().__init__("%r.%s" % (record, field.name))
class MissingError(UserError):
"""Missing record(s).
.. admonition:: Example
When you try to write on a deleted record.
"""
class ValidationError(UserError):
"""Violation of python constraints.
.. admonition:: Example
When you try to create a new user with a login which already exist in the db.
"""
# Deprecated exceptions, only kept for backward compatibility, may be
# removed in the future *without* any further notice than the Deprecation
# Warning.
class except_orm(UserError):
def __init__(self, name, value=None):
warnings.warn("except_orm is a deprecated alias to UserError.", DeprecationWarning)
super().__init__(f"{name}: {value}")
class Warning(UserError):
def __init__(self, *args, **kwargs):
warnings.warn("Warning is a deprecated alias to UserError.", DeprecationWarning)
super().__init__(*args, **kwargs)
class QWebException(Exception):
def __init__(self, *args, **kwargs):
warnings.warn("qweb.QWebException is the exception you are looking for.", DeprecationWarning)
super().__init__(*args, **kwargs)
| 28.457143 | 3,984 |
24,786 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import collections
import logging
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
from urllib.parse import urlparse
import idna
import markupsafe
from lxml import etree
from lxml.html import clean
from werkzeug import urls
import odoo
from odoo.loglevels import ustr
from odoo.tools import misc
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ['base', 'embed', 'frame', 'head', 'iframe', 'link', 'meta',
'noscript', 'object', 'script', 'style', 'title']
tags_to_remove = ['html', 'body']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article bdi section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-o-mail-quote', # quote detection
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translation-id', 'data-oe-nodeid',
'data-last-history-steps',
'data-publish', 'data-id', 'data-res_id', 'data-interval', 'data-member_id', 'data-scroll-background-ratio', 'data-view-id',
'data-class', 'data-mimetype', 'data-original-src', 'data-original-id', 'data-gl-filter', 'data-quality', 'data-resize-width',
'data-shape', 'data-shape-colors', 'data-file-name', 'data-original-mimetype',
])
class _Cleaner(clean.Cleaner):
_style_re = re.compile(r'''([\w-]+)\s*:\s*((?:[^;"']|"[^";]*"|'[^';]*')+)''')
_style_whitelist = [
'font-size', 'font-family', 'font-weight', 'font-style', 'background-color', 'color', 'text-align',
'line-height', 'letter-spacing', 'text-transform', 'text-decoration', 'text-decoration', 'opacity',
'float', 'vertical-align', 'display',
'padding', 'padding-top', 'padding-left', 'padding-bottom', 'padding-right',
'margin', 'margin-top', 'margin-left', 'margin-bottom', 'margin-right',
'white-space',
# box model
'border', 'border-color', 'border-radius', 'border-style', 'border-width', 'border-top', 'border-bottom',
'height', 'width', 'max-width', 'min-width', 'min-height',
# tables
'border-collapse', 'border-spacing', 'caption-side', 'empty-cells', 'table-layout']
_style_whitelist.extend(
['border-%s-%s' % (position, attribute)
for position in ['top', 'bottom', 'left', 'right']
for attribute in ('style', 'color', 'width', 'left-radius', 'right-radius')]
)
strip_classes = False
sanitize_style = False
def __call__(self, doc):
# perform quote detection before cleaning and class removal
for el in doc.iter(tag=etree.Element):
self.tag_quote(el)
super(_Cleaner, self).__call__(doc)
# if we keep attributes but still remove classes
if not getattr(self, 'safe_attrs_only', False) and self.strip_classes:
for el in doc.iter(tag=etree.Element):
self.strip_class(el)
# if we keep style attribute, sanitize them
if not self.style and self.sanitize_style:
for el in doc.iter(tag=etree.Element):
self.parse_style(el)
def tag_quote(self, el):
def _create_new_node(tag, text, tail=None, attrs=None):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
if attrs:
for key, val in attrs.items():
new_node.set(key, val)
return new_node
def _tag_matching_regex_in_text(regex, node, tag='span', attrs=None):
text = node.text or ''
if not re.search(regex, text):
return
child_node = None
idx, node_idx = 0, 0
for item in re.finditer(regex, text):
new_node = _create_new_node(tag, text[item.start():item.end()], None, attrs)
if child_node is None:
node.text = text[idx:item.start()]
new_node.tail = text[item.end():]
node.insert(node_idx, new_node)
else:
child_node.tail = text[idx:item.start()]
new_node.tail = text[item.end():]
node.insert(node_idx, new_node)
child_node = new_node
idx = item.end()
node_idx = node_idx + 1
el_class = el.get('class', '') or ''
el_id = el.get('id', '') or ''
# gmail or yahoo // # outlook, html // # msoffice
if 'gmail_extra' in el_class or \
'divRplyFwdMsg' in el_id or \
('SkyDrivePlaceholder' in el_class or 'SkyDrivePlaceholder' in el_class):
el.set('data-o-mail-quote', '1')
if el.getparent() is not None:
el.getparent().set('data-o-mail-quote-container', '1')
if (el.tag == 'hr' and ('stopSpelling' in el_class or 'stopSpelling' in el_id)) or \
'yahoo_quoted' in el_class:
# Quote all elements after this one
el.set('data-o-mail-quote', '1')
for sibling in el.itersiblings(preceding=False):
sibling.set('data-o-mail-quote', '1')
# html signature (-- <br />blah)
signature_begin = re.compile(r"((?:(?:^|\n)[-]{2}[\s]?$))")
if el.text and el.find('br') is not None and re.search(signature_begin, el.text):
el.set('data-o-mail-quote', '1')
if el.getparent() is not None:
el.getparent().set('data-o-mail-quote-container', '1')
# text-based quotes (>, >>) and signatures (-- Signature)
text_complete_regex = re.compile(r"((?:\n[>]+[^\n\r]*)+|(?:(?:^|\n)[-]{2}[\s]?[\r\n]{1,2}[\s\S]+))")
if not el.get('data-o-mail-quote'):
_tag_matching_regex_in_text(text_complete_regex, el, 'span', {'data-o-mail-quote': '1'})
if el.tag == 'blockquote':
# remove single node
el.set('data-o-mail-quote-node', '1')
el.set('data-o-mail-quote', '1')
if el.getparent() is not None and (el.getparent().get('data-o-mail-quote') or el.getparent().get('data-o-mail-quote-container')) and not el.getparent().get('data-o-mail-quote-node'):
el.set('data-o-mail-quote', '1')
def strip_class(self, el):
if el.attrib.get('class'):
del el.attrib['class']
def parse_style(self, el):
attributes = el.attrib
styling = attributes.get('style')
if styling:
valid_styles = collections.OrderedDict()
styles = self._style_re.findall(styling)
for style in styles:
if style[0].lower() in self._style_whitelist:
valid_styles[style[0].lower()] = style[1]
if valid_styles:
el.attrib['style'] = '; '.join('%s:%s' % (key, val) for (key, val) in valid_styles.items())
else:
del el.attrib['style']
def html_sanitize(src, silent=True, sanitize_tags=True, sanitize_attributes=False, sanitize_style=False, sanitize_form=True, strip_style=False, strip_classes=False):
if not src:
return src
src = ustr(src, errors='replace')
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
src = doctype.sub(u"", src)
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace(u'<%', misc.html_escape(u'<%'))
src = src.replace(u'%>', misc.html_escape(u'%>'))
kwargs = {
'page_structure': True,
'style': strip_style, # True = remove style tags/attrs
'sanitize_style': sanitize_style, # True = sanitize styling
'forms': sanitize_form, # True = remove form tags
'remove_unknown_tags': False,
'comments': False,
'processing_instructions': False
}
if sanitize_tags:
kwargs['allow_tags'] = allowed_tags
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if sanitize_attributes and etree.LXML_VERSION >= (3, 1, 0): # lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
if strip_classes:
current_safe_attrs = safe_attrs - frozenset(['class'])
else:
current_safe_attrs = safe_attrs
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': current_safe_attrs,
})
else:
kwargs.update({
'safe_attrs_only': False, # keep oe-data attributes + style
'strip_classes': strip_classes, # remove classes, even when keeping other attributes
})
try:
src = src.replace('--!>', '-->')
src = re.sub(r'(<!-->|<!--->)', '<!-- -->', src)
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = _Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
assert isinstance(cleaned, str)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace(u'%24', u'$')
cleaned = cleaned.replace(u'%7B', u'{')
cleaned = cleaned.replace(u'%7D', u'}')
cleaned = cleaned.replace(u'%20', u' ')
cleaned = cleaned.replace(u'%5B', u'[')
cleaned = cleaned.replace(u'%5D', u']')
cleaned = cleaned.replace(u'%7C', u'|')
cleaned = cleaned.replace(u'<%', u'<%')
cleaned = cleaned.replace(u'%>', u'%>')
# html considerations so real html content match database value
cleaned.replace(u'\xa0', u' ')
except etree.ParserError as e:
if 'empty' in str(e):
return u""
if not silent:
raise
logger.warning(u'ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = u'<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning(u'unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = u'<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith(u'<div>') and cleaned.endswith(u'</div>'):
cleaned = cleaned[5:-6]
return markupsafe.Markup(cleaned)
# ----------------------------------------------------------
# HTML/Text management
# ----------------------------------------------------------
URL_REGEX = r'(\bhref=[\'"](?!mailto:|tel:|sms:)([^\'"]+)[\'"])'
TEXT_URL_REGEX = r'https?://[\w@:%.+&~#=/-]+(?:\?\S+)?'
# retrieve inner content of the link
HTML_TAG_URL_REGEX = URL_REGEX + r'([^<>]*>([^<>]+)<\/)?'
def validate_url(url):
if urls.url_parse(url).scheme not in ('http', 'https', 'ftp', 'ftps'):
return 'http://' + url
return url
def is_html_empty(html_content):
"""Check if a html content is empty. If there are only formatting tags with style
attributes or a void content return True. Famous use case if a
'<p style="..."><br></p>' added by some web editor.
:param str html_content: html content, coming from example from an HTML field
:returns: bool, True if no content found or if containing only void formatting tags
"""
if not html_content:
return True
tag_re = re.compile(r'\<\s*\/?(?:p|div|span|br|b|i|font)(?:(?=\s+\w*)[^/>]*|\s*)/?\s*\>')
return not bool(re.sub(tag_re, '', html_content).strip())
def html_keep_url(text):
""" Transform the url into clickable link with <a/> tag """
idx = 0
final = ''
link_tags = re.compile(r"""(?<!["'])((ftp|http|https):\/\/(\w+:{0,1}\w*@)?([^\s<"']+)(:[0-9]+)?(\/|\/([^\s<"']))?)(?![^\s<"']*["']|[^\s<"']*</a>)""")
for item in re.finditer(link_tags, text):
final += text[idx:item.start()]
final += '<a href="%s" target="_blank" rel="noreferrer noopener">%s</a>' % (item.group(0), item.group(0))
idx = item.end()
final += text[idx:]
return final
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <[email protected]>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
if not html.strip():
return ''
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html.strip()
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using misc.html_escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- convert url into clickable link
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = misc.html_escape(ustr(text))
# 1. replace \n and \r
text = re.sub(r'(\r\n|\r|\n)', '<br/>', text)
# 2. clickable links
text = html_keep_url(text)
# 3-4: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 5. container
if container_tag: # FIXME: validate that container_tag is just a simple tag?
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return markupsafe.Markup(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % misc.html_escape(ustr(content))
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return markupsafe.Markup('%s%s' % (html, content))
return markupsafe.Markup('%s%s%s' % (html[:insert_location], content, html[insert_location:]))
def prepend_html_content(html_body, html_content):
"""Prepend some HTML content at the beginning of an other HTML content."""
html_content = type(html_content)(re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', html_content))
html_content = html_content.strip()
body_match = re.search(r'<body[^>]*>', html_body) or re.search(r'<html[^>]*>', html_body)
insert_index = body_match.end() if body_match else 0
return html_body[:insert_index] + html_content + html_body[insert_index:]
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,63}$""", re.VERBOSE)
mail_header_msgid_re = re.compile('<[^<>]+>')
email_addr_escapes_re = re.compile(r'[\\"]')
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%s.%.15f-openerp-%s@%s>" % (rndstr, time.time(), res_id, socket.gethostname())
def email_split_tuples(text):
""" Return a list of (name, email) address tuples found in ``text`` . Note
that text should be an email header or a stringified email list as it may
give broader results than expected on actual text. """
if not text:
return []
return [(addr[0], addr[1]) for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [email for (name, email) in email_split_tuples(text)]
def email_split_and_format(text):
""" Return a list of email addresses found in ``text``, formatted using
formataddr. """
if not text:
return []
return [formataddr((name, email)) for (name, email) in email_split_tuples(text)]
def email_normalize(text):
""" Sanitize and standardize email address entries.
A normalized email is considered as :
- having a left part + @ + a right part (the domain can be without '.something')
- being lower case
- having no name before the address. Typically, having no 'Name <>'
Ex:
- Possible Input Email : 'Name <[email protected]>'
- Normalized Output Email : '[email protected]'
"""
emails = email_split(text)
if not emails or len(emails) != 1:
return False
return emails[0].lower()
def email_domain_extract(email):
""" Extract the company domain to be used by IAP services notably. Domain
is extracted from email information e.g:
- [email protected] -> proximus.be
"""
normalized_email = email_normalize(email)
if normalized_email:
return normalized_email.split('@')[1]
return False
def email_domain_normalize(domain):
"""Return the domain normalized or False if the domain is invalid."""
if not domain or '@' in domain:
return False
return domain.lower()
def url_domain_extract(url):
""" Extract the company domain to be used by IAP services notably. Domain
is extracted from an URL e.g:
- www.info.proximus.be -> proximus.be
"""
parser_results = urlparse(url)
company_hostname = parser_results.hostname
if company_hostname and '.' in company_hostname:
return '.'.join(company_hostname.split('.')[-2:]) # remove subdomains
return False
def email_escape_char(email_address):
""" Escape problematic characters in the given email address string"""
return email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
# was mail_thread.decode_header()
def decode_message_header(message, header, separator=' '):
return separator.join(h for h in message.get_all(header, []) if h)
def formataddr(pair, charset='utf-8'):
"""Pretty format a 2-tuple of the form (realname, email_address).
If the first element of pair is falsy then only the email address
is returned.
Set the charset to ascii to get a RFC-2822 compliant email. The
realname will be base64 encoded (if necessary) and the domain part
of the email will be punycode encoded (if necessary). The local part
is left unchanged thus require the SMTPUTF8 extension when there are
non-ascii characters.
>>> formataddr(('John Doe', '[email protected]'))
'"John Doe" <[email protected]>'
>>> formataddr(('', '[email protected]'))
'[email protected]'
"""
name, address = pair
local, _, domain = address.rpartition('@')
try:
domain.encode(charset)
except UnicodeEncodeError:
# rfc5890 - Internationalized Domain Names for Applications (IDNA)
domain = idna.encode(domain).decode('ascii')
if name:
try:
name.encode(charset)
except UnicodeEncodeError:
# charset mismatch, encode as utf-8/base64
# rfc2047 - MIME Message Header Extensions for Non-ASCII Text
name = base64.b64encode(name.encode('utf-8')).decode('ascii')
return f"=?utf-8?b?{name}?= <{local}@{domain}>"
else:
# ascii name, escape it if needed
# rfc2822 - Internet Message Format
# #section-3.4 - Address Specification
name = email_addr_escapes_re.sub(r'\\\g<0>', name)
return f'"{name}" <{local}@{domain}>'
return f"{local}@{domain}"
def encapsulate_email(old_email, new_email):
"""Change the FROM of the message and use the old one as name.
e.g.
* Old From: "Admin" <[email protected]>
* New From: [email protected]
* Output: "Admin" <[email protected]>
"""
old_email_split = getaddresses([old_email])
if not old_email_split or not old_email_split[0]:
return old_email
new_email_split = getaddresses([new_email])
if not new_email_split or not new_email_split[0]:
return
old_name, old_email = old_email_split[0]
if old_name:
name_part = old_name
else:
name_part = old_email.split("@")[0]
return formataddr((
name_part,
new_email_split[0][1],
))
| 39.405405 | 24,786 |
56,815 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
import cProfile
import collections
import datetime
import hmac as hmac_lib
import hashlib
import io
import itertools
import os
import pickle as pickle_
import re
import socket
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import types
import unicodedata
import zipfile
from collections import OrderedDict
from collections.abc import Iterable, Mapping, MutableMapping, MutableSet
from contextlib import contextmanager
from difflib import HtmlDiff
from functools import wraps
from itertools import islice, groupby as itergroupby
from operator import itemgetter
import babel
import babel.dates
import markupsafe
import passlib.utils
import pytz
import werkzeug.utils
from lxml import etree
import odoo
import odoo.addons
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import get_encodings, ustr, exception_to_unicode # noqa
from . import pycompat
from .cache import *
from .config import config
from .parse_version import parse_version
from .which import which
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
NON_BREAKING_SPACE = u'\N{NO-BREAK SPACE}'
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
def file_path(file_path, filter_ext=('',), env=None):
"""Verify that a file exists under a known `addons_path` directory and return its full path.
Examples::
>>> file_path('hr')
>>> file_path('hr/static/description/icon.png')
>>> file_path('hr/static/description/icon.png', filter_ext=('.png', '.jpg'))
:param str file_path: absolute file path, or relative path within any `addons_path` directory
:param list[str] filter_ext: optional list of supported extensions (lowercase, with leading dot)
:param env: optional environment, required for a file path within a temporary directory
created using `file_open_temporary_directory()`
:return: the absolute path to the file
:raise FileNotFoundError: if the file is not found under the known `addons_path` directories
:raise ValueError: if the file doesn't have one of the supported extensions (`filter_ext`)
"""
root_path = os.path.abspath(config['root_path'])
addons_paths = odoo.addons.__path__ + [root_path]
if env and hasattr(env.transaction, '__file_open_tmp_paths'):
addons_paths += env.transaction.__file_open_tmp_paths
is_abs = os.path.isabs(file_path)
normalized_path = os.path.normpath(os.path.normcase(file_path))
if filter_ext and not normalized_path.lower().endswith(filter_ext):
raise ValueError("Unsupported file: " + file_path)
# ignore leading 'addons/' if present, it's the final component of root_path, but
# may sometimes be included in relative paths
if normalized_path.startswith('addons' + os.sep):
normalized_path = normalized_path[7:]
for addons_dir in addons_paths:
# final path sep required to avoid partial match
parent_path = os.path.normpath(os.path.normcase(addons_dir)) + os.sep
fpath = (normalized_path if is_abs else
os.path.normpath(os.path.normcase(os.path.join(parent_path, normalized_path))))
if fpath.startswith(parent_path) and os.path.exists(fpath):
return fpath
raise FileNotFoundError("File not found: " + file_path)
def file_open(name, mode="r", filter_ext=None, env=None):
"""Open a file from within the addons_path directories, as an absolute or relative path.
Examples::
>>> file_open('hr/static/description/icon.png')
>>> file_open('hr/static/description/icon.png', filter_ext=('.png', '.jpg'))
>>> with file_open('/opt/odoo/addons/hr/static/description/icon.png', 'rb') as f:
... contents = f.read()
:param name: absolute or relative path to a file located inside an addon
:param mode: file open mode, as for `open()`
:param list[str] filter_ext: optional list of supported extensions (lowercase, with leading dot)
:param env: optional environment, required to open a file within a temporary directory
created using `file_open_temporary_directory()`
:return: file object, as returned by `open()`
:raise FileNotFoundError: if the file is not found under the known `addons_path` directories
:raise ValueError: if the file doesn't have one of the supported extensions (`filter_ext`)
"""
path = file_path(name, filter_ext=filter_ext, env=env)
if os.path.isfile(path):
if 'b' not in mode:
# Force encoding for text mode, as system locale could affect default encoding,
# even with the latest Python 3 versions.
# Note: This is not covered by a unit test, due to the platform dependency.
# For testing purposes you should be able to force a non-UTF8 encoding with:
# `sudo locale-gen fr_FR; LC_ALL=fr_FR.iso8859-1 python3 ...'
# See also PEP-540, although we can't rely on that at the moment.
return open(path, mode, encoding="utf-8")
return open(path, mode)
raise FileNotFoundError("Not a file: " + name)
@contextmanager
def file_open_temporary_directory(env):
"""Create and return a temporary directory added to the directories `file_open` is allowed to read from.
`file_open` will be allowed to open files within the temporary directory
only for environments of the same transaction than `env`.
Meaning, other transactions/requests from other users or even other databases
won't be allowed to open files from this directory.
Examples::
>>> with odoo.tools.file_open_temporary_directory(self.env) as module_dir:
... with zipfile.ZipFile('foo.zip', 'r') as z:
... z.extract('foo/__manifest__.py', module_dir)
... with odoo.tools.file_open('foo/__manifest__.py', env=self.env) as f:
... manifest = f.read()
:param env: environment for which the temporary directory is created.
:return: the absolute path to the created temporary directory
"""
assert not hasattr(env.transaction, '__file_open_tmp_paths'), 'Reentrancy is not implemented for this method'
with tempfile.TemporaryDirectory() as module_dir:
try:
env.transaction.__file_open_tmp_paths = (module_dir,)
yield module_dir
finally:
del env.transaction.__file_open_tmp_paths
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a unique list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
r = []
for e in list:
if isinstance(e, (bytes, str)) or not isinstance(e, collections.abc.Iterable):
r.append(e)
else:
r.extend(flatten(e))
return r
def reverse_enumerate(l):
"""Like enumerate but in the other direction
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return zip(range(len(l)-1, -1, -1), reversed(l))
def partition(pred, elems):
""" Return a pair equivalent to:
``filter(pred, elems), filter(lambda x: not pred(x), elems)` """
yes, nos = [], []
for elem in elems:
(yes if pred(elem) else nos).append(elem)
return yes, nos
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
for it in elems[n]:
visit(it)
result.append(n)
for el in elems:
visit(el)
return result
def merge_sequences(*iterables):
""" Merge several iterables into a list. The result is the union of the
iterables, ordered following the partial order given by the iterables,
with a bias towards the end for the last iterable::
seq = merge_sequences(['A', 'B', 'C'])
assert seq == ['A', 'B', 'C']
seq = merge_sequences(
['A', 'B', 'C'],
['Z'], # 'Z' can be anywhere
['Y', 'C'], # 'Y' must precede 'C';
['A', 'X', 'Y'], # 'X' must follow 'A' and precede 'Y'
)
assert seq == ['A', 'B', 'X', 'Y', 'C', 'Z']
"""
# we use an OrderedDict to keep elements in order by default
deps = OrderedDict() # {item: elems_before_item}
for iterable in iterables:
prev = None
for index, item in enumerate(iterable):
if not index:
deps.setdefault(item, [])
else:
deps.setdefault(item, []).append(prev)
prev = item
return topological_sort(deps)
try:
import xlwt
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
try:
import xlsxwriter
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedXlsxWorkbook(xlsxwriter.Workbook):
# TODO when xlsxwriter bump to 0.9.8, add worksheet_class=None parameter instead of kw
def add_worksheet(self, name=None, **kw):
if name:
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedXlsxWorkbook, self).add_worksheet(name, **kw)
xlsxwriter.Workbook = PatchedXlsxWorkbook
except ImportError:
xlsxwriter = None
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
csvpath = odoo.modules.module.get_resource_path('base', 'data', 'res.lang.csv')
try:
# read (code, name) from languages in base/data/res.lang.csv
with open(csvpath, 'rb') as csvfile:
reader = pycompat.csv_reader(csvfile, delimiter=',', quotechar='"')
fields = next(reader)
code_index = fields.index("code")
name_index = fields.index("name")
result = [
(row[code_index], row[name_index])
for row in reader
]
except Exception:
_logger.error("Could not read %s", csvpath)
result = []
return sorted(result or [('en_US', u'English')], key=itemgetter(1))
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb', 'Tb')
if isinstance(sz, str):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.__name__,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
DATE_LENGTH = len(datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT))
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
:param int n: maximum size of each generated chunk
:param Iterable iterable: iterable to chunk into pieces
:param piece_maker: callable taking an iterable and collecting each
chunk from its slice, *must consume the entire slice*.
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
def discardattr(obj, key):
""" Perform a ``delattr(obj, key)`` but without crashing if ``key`` is not present. """
try:
delattr(obj, key)
except AttributeError:
pass
# ---------------------------------------------
# String management
# ---------------------------------------------
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
if not input_str:
return input_str
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('odoo.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('odoo.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, str),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
__next__ = next
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in itergroupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict updates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None, thread_idents=None):
""" Signal handler: dump a stack trace for each existing thread or given
thread(s) specified through the ``thread_idents`` sequence.
"""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = {th.ident: {'repr': repr(th),
'uid': getattr(th, 'uid', 'n/a'),
'dbname': getattr(th, 'dbname', 'n/a'),
'url': getattr(th, 'url', 'n/a')}
for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
if not thread_idents or threadId in thread_idents:
thread_info = threads_info.get(threadId, {})
code.append("\n# Thread: %s (db:%s) (uid:%s) (url:%s)" %
(thread_info.get('repr', threadId),
thread_info.get('dbname', 'n/a'),
thread_info.get('uid', 'n/a'),
thread_info.get('url', 'n/a')))
for line in extract_stack(stack):
code.append(line)
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(freehash(item) for item in arg))
else:
return id(arg)
def clean_context(context):
""" This function take a dictionary and remove each entry with its key starting with 'default_' """
return {k: v for k, v in context.items() if not k.startswith('default_')}
class frozendict(dict):
""" An implementation of an immutable dictionary. """
__slots__ = ()
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.items()))
class Collector(dict):
""" A mapping from keys to tuples. This implements a relation, and can be
seen as a space optimization for ``defaultdict(tuple)``.
"""
__slots__ = ()
def __getitem__(self, key):
return self.get(key, ())
def __setitem__(self, key, val):
val = tuple(val)
if val:
super().__setitem__(key, val)
else:
super().pop(key, None)
def add(self, key, val):
vals = self[key]
if val not in vals:
self[key] = vals + (val,)
def discard_keys_and_values(self, excludes):
for key in excludes:
self.pop(key, None)
for key, vals in list(self.items()):
self[key] = tuple(val for val in vals if val not in excludes)
class StackMap(MutableMapping):
""" A stack of mappings behaving as a single mapping, and used to implement
nested scopes. The lookups search the stack from top to bottom, and
returns the first value found. Mutable operations modify the topmost
mapping only.
"""
__slots__ = ['_maps']
def __init__(self, m=None):
self._maps = [] if m is None else [m]
def __getitem__(self, key):
for mapping in reversed(self._maps):
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, val):
self._maps[-1][key] = val
def __delitem__(self, key):
del self._maps[-1][key]
def __iter__(self):
return iter({key for mapping in self._maps for key in mapping})
def __len__(self):
return sum(1 for key in self)
def __str__(self):
return u"<StackMap %s>" % self._maps
def pushmap(self, m=None):
self._maps.append({} if m is None else m)
def popmap(self):
return self._maps.pop()
class OrderedSet(MutableSet):
""" A set collection that remembers the elements first insertion order. """
__slots__ = ['_map']
def __init__(self, elems=()):
self._map = dict.fromkeys(elems)
def __contains__(self, elem):
return elem in self._map
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
def add(self, elem):
self._map[elem] = None
def discard(self, elem):
self._map.pop(elem, None)
def update(self, elems):
self._map.update(zip(elems, itertools.repeat(None)))
def difference_update(self, elems):
for elem in elems:
self.discard(elem)
def __repr__(self):
return f'{type(self).__name__}({list(self)!r})'
class LastOrderedSet(OrderedSet):
""" A set collection that remembers the elements last insertion order. """
def add(self, elem):
OrderedSet.discard(self, elem)
OrderedSet.add(self, elem)
class Callbacks:
""" A simple queue of callback functions. Upon run, every function is
called (in addition order), and the queue is emptied.
callbacks = Callbacks()
# add foo
def foo():
print("foo")
callbacks.add(foo)
# add bar
callbacks.add
def bar():
print("bar")
# add foo again
callbacks.add(foo)
# call foo(), bar(), foo(), then clear the callback queue
callbacks.run()
The queue also provides a ``data`` dictionary, that may be freely used to
store anything, but is mostly aimed at aggregating data for callbacks. The
dictionary is automatically cleared by ``run()`` once all callback functions
have been called.
# register foo to process aggregated data
@callbacks.add
def foo():
print(sum(callbacks.data['foo']))
callbacks.data.setdefault('foo', []).append(1)
...
callbacks.data.setdefault('foo', []).append(2)
...
callbacks.data.setdefault('foo', []).append(3)
# call foo(), which prints 6
callbacks.run()
Given the global nature of ``data``, the keys should identify in a unique
way the data being stored. It is recommended to use strings with a
structure like ``"{module}.{feature}"``.
"""
__slots__ = ['_funcs', 'data']
def __init__(self):
self._funcs = collections.deque()
self.data = {}
def add(self, func):
""" Add the given function. """
self._funcs.append(func)
def run(self):
""" Call all the functions (in addition order), then clear associated data.
"""
while self._funcs:
func = self._funcs.popleft()
func()
self.clear()
def clear(self):
""" Remove all callbacks and data from self. """
self._funcs.clear()
self.data.clear()
class IterableGenerator:
""" An iterable object based on a generator function, which is called each
time the object is iterated over.
"""
__slots__ = ['func', 'args']
def __init__(self, func, *args):
self.func = func
self.args = args
def __iter__(self):
return self.func(*self.args)
def groupby(iterable, key=None):
""" Return a collection of pairs ``(key, elements)`` from ``iterable``. The
``key`` is a function computing a key value for each element. This
function is similar to ``itertools.groupby``, but aggregates all
elements under the same key, not only consecutive elements.
"""
if key is None:
key = lambda arg: arg
groups = defaultdict(list)
for elem in iterable:
groups[key(elem)].append(elem)
return groups.items()
def unique(it):
""" "Uniquifier" for the provided iterable: will output each element of
the iterable once.
The iterable's elements must be hashahble.
:param Iterable it:
:rtype: Iterator
"""
seen = set()
for e in it:
if e not in seen:
seen.add(e)
yield e
class Reverse(object):
""" Wraps a value and reverses its ordering, useful in key functions when
mixing ascending and descending sort on non-numeric data as the
``reverse`` parameter can not do piecemeal reordering.
"""
__slots__ = ['val']
def __init__(self, val):
self.val = val
def __eq__(self, other): return self.val == other.val
def __ne__(self, other): return self.val != other.val
def __ge__(self, other): return self.val <= other.val
def __gt__(self, other): return self.val < other.val
def __le__(self, other): return self.val >= other.val
def __lt__(self, other): return self.val > other.val
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
html_escape = markupsafe.escape
def get_lang(env, lang_code=False):
"""
Retrieve the first lang object installed, by checking the parameter lang_code,
the context and then the company. If no lang is installed from those variables,
fallback on the first lang installed in the system.
:param str lang_code: the locale (i.e. en_US)
:return res.lang: the first lang found that is installed on the system.
"""
langs = [code for code, _ in env['res.lang'].get_installed()]
lang = langs[0]
if lang_code and lang_code in langs:
lang = lang_code
elif env.context.get('lang') in langs:
lang = env.context.get('lang')
elif env.user.company_id.partner_id.lang in langs:
lang = env.user.company_id.partner_id.lang
return env['res.lang']._lang_get(lang)
def babel_locale_parse(lang_code):
try:
return babel.Locale.parse(lang_code)
except:
try:
return babel.Locale.default()
except:
return babel.Locale.parse("en_US")
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif currency_obj:
digits = currency_obj.decimal_places
if isinstance(value, str) and not value:
return ''
lang_obj = get_lang(env)
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj and currency_obj.symbol:
if currency_obj.position == 'after':
res = '%s%s%s' % (res, NON_BREAKING_SPACE, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s%s%s' % (currency_obj.symbol, NON_BREAKING_SPACE, res)
return res
def format_date(env, value, lang_code=False, date_format=False):
'''
Formats the date in a given format.
:param env: an environment.
:param date, datetime or string value: the date to format.
:param string lang_code: the lang code, if not specified it is extracted from the
environment context.
:param string date_format: the format or the date (LDML format), if not specified the
default format of the lang.
:return: date formatted in the specified format.
:rtype: string
'''
if not value:
return ''
if isinstance(value, str):
if len(value) < DATE_LENGTH:
return ''
if len(value) > DATE_LENGTH:
# a datetime, convert to correct timezone
value = odoo.fields.Datetime.from_string(value)
value = odoo.fields.Datetime.context_timestamp(env['res.lang'], value)
else:
value = odoo.fields.Datetime.from_string(value)
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
if not date_format:
date_format = posix_to_ldml(lang.date_format, locale=locale)
return babel.dates.format_date(value, format=date_format, locale=locale)
def parse_date(env, value, lang_code=False):
'''
Parse the date from a given format. If it is not a valid format for the
localization, return the original string.
:param env: an environment.
:param string value: the date to parse.
:param string lang_code: the lang code, if not specified it is extracted from the
environment context.
:return: date object from the localized string
:rtype: datetime.date
'''
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
try:
return babel.dates.parse_date(value, locale=locale)
except:
return value
def format_datetime(env, value, tz=False, dt_format='medium', lang_code=False):
""" Formats the datetime in a given format.
:param {str, datetime} value: naive datetime to format either in string or in datetime
:param {str} tz: name of the timezone in which the given datetime should be localized
:param {str} dt_format: one of “full”, “long”, “medium”, or “short”, or a custom date/time pattern compatible with `babel` lib
:param {str} lang_code: ISO code of the language to use to render the given datetime
"""
if not value:
return ''
if isinstance(value, str):
timestamp = odoo.fields.Datetime.from_string(value)
else:
timestamp = value
tz_name = tz or env.user.tz or 'UTC'
utc_datetime = pytz.utc.localize(timestamp, is_dst=False)
try:
context_tz = pytz.timezone(tz_name)
localized_datetime = utc_datetime.astimezone(context_tz)
except Exception:
localized_datetime = utc_datetime
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code or lang_code) # lang can be inactive, so `lang`is empty
if not dt_format:
date_format = posix_to_ldml(lang.date_format, locale=locale)
time_format = posix_to_ldml(lang.time_format, locale=locale)
dt_format = '%s %s' % (date_format, time_format)
# Babel allows to format datetime in a specific language without change locale
# So month 1 = January in English, and janvier in French
# Be aware that the default value for format is 'medium', instead of 'short'
# medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31
# short: 1/5/16, 10:20 PM | 5/01/16 22:20
# Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields
return babel.dates.format_datetime(localized_datetime, dt_format, locale=locale)
def format_time(env, value, tz=False, time_format='medium', lang_code=False):
""" Format the given time (hour, minute and second) with the current user preference (language, format, ...)
:param value: the time to format
:type value: `datetime.time` instance. Could be timezoned to display tzinfo according to format (e.i.: 'full' format)
:param tz: name of the timezone in which the given datetime should be localized
:param time_format: one of “full”, “long”, “medium”, or “short”, or a custom time pattern
:param lang_code: ISO
:rtype str
"""
if not value:
return ''
if isinstance(value, datetime.time):
localized_datetime = value
else:
if isinstance(value, str):
value = odoo.fields.Datetime.from_string(value)
tz_name = tz or env.user.tz or 'UTC'
utc_datetime = pytz.utc.localize(value, is_dst=False)
try:
context_tz = pytz.timezone(tz_name)
localized_datetime = utc_datetime.astimezone(context_tz)
except Exception:
localized_datetime = utc_datetime
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
if not time_format:
time_format = posix_to_ldml(lang.time_format, locale=locale)
return babel.dates.format_time(localized_datetime, format=time_format, locale=locale)
def _format_time_ago(env, time_delta, lang_code=False, add_direction=True):
if not lang_code:
langs = [code for code, _ in env['res.lang'].get_installed()]
lang_code = env.context['lang'] if env.context.get('lang') in langs else (env.user.company_id.partner_id.lang or langs[0])
locale = babel_locale_parse(lang_code)
return babel.dates.format_timedelta(-time_delta, add_direction=add_direction, locale=locale)
def format_decimalized_number(number, decimal=1):
"""Format a number to display to nearest metrics unit next to it.
Do not display digits if all visible digits are null.
Do not display units higher then "Tera" because most of people don't know what
a "Yotta" is.
>>> format_decimalized_number(123_456.789)
123.5k
>>> format_decimalized_number(123_000.789)
123k
>>> format_decimalized_number(-123_456.789)
-123.5k
>>> format_decimalized_number(0.789)
0.8
"""
for unit in ['', 'k', 'M', 'G']:
if abs(number) < 1000.0:
return "%g%s" % (round(number, decimal), unit)
number /= 1000.0
return "%g%s" % (round(number, decimal), 'T')
def format_decimalized_amount(amount, currency=None):
"""Format a amount to display the currency and also display the metric unit of the amount.
>>> format_decimalized_amount(123_456.789, res.currency("$"))
$123.5k
"""
formated_amount = format_decimalized_number(amount)
if not currency:
return formated_amount
if currency.position == 'before':
return "%s%s" % (currency.symbol or '', formated_amount)
return "%s %s" % (formated_amount, currency.symbol or '')
def format_amount(env, amount, currency, lang_code=False):
fmt = "%.{0}f".format(currency.decimal_places)
lang = get_lang(env, lang_code)
formatted_amount = lang.format(fmt, currency.round(amount), grouping=True, monetary=True)\
.replace(r' ', u'\N{NO-BREAK SPACE}').replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}')
pre = post = u''
if currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'.format(symbol=currency.symbol or '')
else:
post = u'\N{NO-BREAK SPACE}{symbol}'.format(symbol=currency.symbol or '')
return u'{pre}{0}{post}'.format(formatted_amount, pre=pre, post=post)
def format_duration(value):
""" Format a float: used to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "01:30").
"""
hours, minutes = divmod(abs(value) * 60, 60)
minutes = round(minutes)
if minutes == 60:
minutes = 0
hours += 1
if value < 0:
return '-%02d:%02d' % (hours, minutes)
return '%02d:%02d' % (hours, minutes)
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
# forbid globals entirely: str/unicode, int/long, float, bool, tuple, list, dict, None
class Unpickler(pickle_.Unpickler, object):
find_global = None # Python 2
find_class = None # Python 3
def _pickle_load(stream, encoding='ASCII', errors=False):
if sys.version_info[0] == 3:
unpickler = Unpickler(stream, encoding=encoding)
else:
unpickler = Unpickler(stream)
try:
return unpickler.load()
except Exception:
_logger.warning('Failed unpickling data, returning default: %r',
errors, exc_info=True)
return errors
pickle = types.ModuleType(__name__ + '.pickle')
pickle.load = _pickle_load
pickle.loads = lambda text, encoding='ASCII': _pickle_load(io.BytesIO(text), encoding=encoding)
pickle.dump = pickle_.dump
pickle.dumps = pickle_.dumps
class DotDict(dict):
"""Helper for dot.notation access to dictionary attributes
E.g.
foo = DotDict({'bar': False})
return foo.bar
"""
def __getattr__(self, attrib):
val = self.get(attrib)
return DotDict(val) if type(val) is dict else val
def get_diff(data_from, data_to, custom_style=False):
"""
Return, in an HTML table, the diff between two texts.
:param tuple data_from: tuple(text, name), name will be used as table header
:param tuple data_to: tuple(text, name), name will be used as table header
:param tuple custom_style: string, style css including <style> tag.
:return: a string containing the diff in an HTML table format.
"""
def handle_style(html_diff, custom_style):
""" The HtmlDiff lib will add some useful classes on the DOM to
identify elements. Simply append to those classes some BS4 ones.
For the table to fit the modal width, some custom style is needed.
"""
to_append = {
'diff_header': 'bg-600 text-center align-top px-2',
'diff_next': 'd-none',
'diff_add': 'bg-success',
'diff_chg': 'bg-warning',
'diff_sub': 'bg-danger',
}
for old, new in to_append.items():
html_diff = html_diff.replace(old, "%s %s" % (old, new))
html_diff = html_diff.replace('nowrap', '')
html_diff += custom_style or '''
<style>
table.diff { width: 100%; }
table.diff th.diff_header { width: 50%; }
table.diff td.diff_header { white-space: nowrap; }
table.diff td { word-break: break-all; }
</style>
'''
return html_diff
diff = HtmlDiff(tabsize=2).make_table(
data_from[0].splitlines(),
data_to[0].splitlines(),
data_from[1],
data_to[1],
context=True, # Show only diff lines, not all the code
numlines=3,
)
return handle_style(diff, custom_style)
def traverse_containers(val, type_):
""" Yields atoms filtered by specified type_ (or type tuple), traverses
through standard containers (non-string mappings or sequences) *unless*
they're selected by the type filter
"""
from odoo.models import BaseModel
if isinstance(val, type_):
yield val
elif isinstance(val, (str, bytes, BaseModel)):
return
elif isinstance(val, Mapping):
for k, v in val.items():
yield from traverse_containers(k, type_)
yield from traverse_containers(v, type_)
elif isinstance(val, collections.abc.Sequence):
for v in val:
yield from traverse_containers(v, type_)
def hmac(env, scope, message, hash_function=hashlib.sha256):
"""Compute HMAC with `database.secret` config parameter as key.
:param env: sudo environment to use for retrieving config parameter
:param message: message to authenticate
:param scope: scope of the authentication, to have different signature for the same
message in different usage
:param hash_function: hash function to use for HMAC (default: SHA-256)
"""
if not scope:
raise ValueError('Non-empty scope required')
secret = env['ir.config_parameter'].get_param('database.secret')
message = repr((scope, message))
return hmac_lib.new(
secret.encode(),
message.encode(),
hash_function,
).hexdigest()
| 34.474196 | 56,779 |
8,162 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import reprlib
shortener = reprlib.Repr()
shortener.maxstring = 150
shorten = shortener.repr
class Speedscope:
def __init__(self, name='Speedscope', init_stack_trace=None):
self.init_stack_trace = init_stack_trace or []
self.init_stack_trace_level = len(self.init_stack_trace)
self.caller_frame = None
self.convert_stack(self.init_stack_trace)
self.init_caller_frame = None
if self.init_stack_trace:
self.init_caller_frame = self.init_stack_trace[-1]
self.profiles_raw = {}
self.name = name
self.frames_indexes = {}
self.frame_count = 0
self.profiles = []
def add(self, key, profile):
for entry in profile:
self.caller_frame = self.init_caller_frame
self.convert_stack(entry['stack'] or [])
if 'query' in entry:
query = entry['query']
full_query = entry['full_query']
entry['stack'].append((f'sql({shorten(query)})', full_query, None))
self.profiles_raw[key] = profile
def convert_stack(self, stack):
for index, frame in enumerate(stack):
method = frame[2]
line = ''
number = ''
if self.caller_frame and len(self.caller_frame) == 4:
line = f"called at {self.caller_frame[0]} ({self.caller_frame[3].strip()})"
number = self.caller_frame[1]
stack[index] = (method, line, number,)
self.caller_frame = frame
def add_output(self, names, complete=True, display_name=None, use_context=True, **params):
entries = []
display_name = display_name or ','.join(names)
for name in names:
entries += self.profiles_raw[name]
entries.sort(key=lambda e: e['start'])
result = self.process(entries, use_context=use_context, **params)
if not result:
return self
start = result[0]['at']
end = result[-1]['at']
if complete:
start_stack = []
end_stack = []
init_stack_trace_ids = self.stack_to_ids(self.init_stack_trace, use_context and entries[0].get('exec_context'))
for frame_id in init_stack_trace_ids:
start_stack.append({
"type": "O",
"frame": frame_id,
"at": start
})
for frame_id in reversed(init_stack_trace_ids):
end_stack.append({
"type": "C",
"frame": frame_id,
"at": end
})
result = start_stack + result + end_stack
self.profiles.append({
"name": display_name,
"type": "evented",
"unit": "seconds",
"startValue": 0,
"endValue": end - start,
"events": result
})
return self
def add_default(self):
if len(self.profiles_raw) > 1:
self.add_output(self.profiles_raw, display_name='Combined')
self.add_output(self.profiles_raw, display_name='Combined no context', use_context=False)
for key, profile in self.profiles_raw.items():
sql = profile and profile[0].get('query')
if sql:
self.add_output([key], hide_gaps=True, display_name=f'{key} (no gap)')
self.add_output([key], continuous=False, complete=False, display_name=f'{key} (density)')
else:
self.add_output([key], display_name=key)
return self
def make(self):
if not self.profiles:
self.add_default()
return {
"name": self.name,
"activeProfileIndex": 0,
"$schema": "https://www.speedscope.app/file-format-schema.json",
"shared": {
"frames": [{
"name": frame[0],
"file": frame[1],
"line": frame[2]
} for frame in self.frames_indexes]
},
"profiles": self.profiles,
}
def get_frame_id(self, frame):
if frame not in self.frames_indexes:
self.frames_indexes[frame] = self.frame_count
self.frame_count += 1
return self.frames_indexes[frame]
def stack_to_ids(self, stack, context, stack_offset=0):
"""
:param stack: A list of hashable frame
:param context: an iterable of (level, value) ordered by level
:param stack_offset: offeset level for stack
Assemble stack and context and return a list of ids representing
this stack, adding each corresponding context at the corresponding
level.
"""
stack_ids = []
context_iterator = iter(context or ())
context_level, context_value = next(context_iterator, (None, None))
# consume iterator until we are over stack_offset
while context_level is not None and context_level < stack_offset:
context_level, context_value = next(context_iterator, (None, None))
for level, frame in enumerate(stack, start=stack_offset + 1):
while context_level == level:
context_frame = (", ".join(f"{k}={v}" for k, v in context_value.items()), '', '')
stack_ids.append(self.get_frame_id(context_frame))
context_level, context_value = next(context_iterator, (None, None))
stack_ids.append(self.get_frame_id(frame))
return stack_ids
def process(self, entries, continuous=True, hide_gaps=False, use_context=True, constant_time=False):
# constant_time parameters is mainly usefull to hide temporality when focussing on sql determinism
entry_end = previous_end = None
if not entries:
return []
events = []
current_stack_ids = []
frames_start = entries[0]['start']
# add last closing entry if missing
last_entry = entries[-1]
if last_entry['stack']:
entries.append({'stack': [], 'start': last_entry['start'] + last_entry.get('time', 0)})
for index, entry in enumerate(entries):
if constant_time:
entry_start = close_time = index
else:
previous_end = entry_end
if hide_gaps and previous_end:
entry_start = previous_end
else:
entry_start = entry['start'] - frames_start
if previous_end and previous_end > entry_start:
# skip entry if entry starts after another entry end
continue
if previous_end:
close_time = min(entry_start, previous_end)
else:
close_time = entry_start
entry_time = entry.get('time')
entry_end = None if entry_time is None else entry_start + entry_time
entry_stack_ids = self.stack_to_ids(
entry['stack'] or [],
use_context and entry.get('exec_context'),
self.init_stack_trace_level
)
level = 0
if continuous:
level = -1
for level, at_level in enumerate(zip(current_stack_ids, entry_stack_ids)):
current, new = at_level
if current != new:
break
else:
level += 1
for frame in reversed(current_stack_ids[level:]):
events.append({
"type": "C",
"frame": frame,
"at": close_time
})
for frame in entry_stack_ids[level:]:
events.append({
"type": "O",
"frame": frame,
"at": entry_start
})
current_stack_ids = entry_stack_ids
return events
| 38.140187 | 8,162 |
50,373 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import codecs
import fnmatch
import functools
import inspect
import io
import locale
import logging
import os
import polib
import re
import tarfile
import tempfile
import threading
from collections import defaultdict, namedtuple
from datetime import datetime
from os.path import join
from pathlib import Path
from babel.messages import extract
from lxml import etree, html
import odoo
from . import config, pycompat
from .misc import file_open, get_iso_codes, SKIPPED_ELEMENT_TYPES
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style', 'title')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_Indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for Spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all English small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
# these direct uses of CSV are ok.
import csv # pylint: disable=deprecated-module
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
# FIXME: holy shit this whole thing needs to be cleaned up hard it's a mess
def encode(s):
assert isinstance(s, str)
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
'font', 'i', 'ins', 'kbd', 'keygen', 'mark', 'math', 'meter', 'output',
'progress', 'q', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub',
'sup', 'time', 'u', 'var', 'wbr', 'text', 'select', 'option',
}
# Which attributes must be translated. This is a dict, where the value indicates
# a condition for a node to have the attribute translatable.
TRANSLATED_ATTRS = dict.fromkeys({
'string', 'add-label', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title', 'aria-label',
'aria-keyshortcuts', 'aria-placeholder', 'aria-roledescription', 'aria-valuetext',
'value_label', 'data-tooltip',
}, lambda e: True)
def translate_attrib_value(node):
# check if the value attribute of a node must be translated
classes = node.attrib.get('class', '').split(' ')
return (
(node.tag == 'input' and node.attrib.get('type', 'text') == 'text')
and 'datetimepicker-input' not in classes
or (node.tag == 'input' and node.attrib.get('type') == 'hidden')
and 'o_translatable_input_hidden' in classes
)
TRANSLATED_ATTRS.update(
value=translate_attrib_value,
text=lambda e: (e.tag == 'field' and e.attrib.get('widget', '') == 'url'),
**{f't-attf-{attr}': cond for attr, cond in TRANSLATED_ATTRS.items()},
)
avoid_pattern = re.compile(r"\s*<!DOCTYPE", re.IGNORECASE | re.MULTILINE | re.UNICODE)
node_pattern = re.compile(r"<[^>]*>(.*)</[^<]*>", re.DOTALL | re.MULTILINE | re.UNICODE)
def translate_xml_node(node, callback, parse, serialize):
""" Return the translation of the given XML/HTML node.
:param callback: callback(text) returns translated text or None
:param parse: parse(text) returns a node (text is unicode)
:param serialize: serialize(node) returns unicode text
"""
def nonspace(text):
""" Return whether ``text`` is a string with non-space characters. """
return bool(text) and not text.isspace()
def translatable(node):
""" Return whether the given node can be translated as a whole. """
return (
node.tag in TRANSLATED_ELEMENTS
and not any(key.startswith("t-") for key in node.attrib)
and all(translatable(child) for child in node)
)
def hastext(node, pos=0):
""" Return whether the given node contains some text to translate at the
given child node position. The text may be before the child node,
inside it, or after it.
"""
return (
# there is some text before node[pos]
nonspace(node[pos-1].tail if pos else node.text)
or (
pos < len(node)
and translatable(node[pos])
and (
any( # attribute to translate
val and key in TRANSLATED_ATTRS and TRANSLATED_ATTRS[key](node[pos])
for key, val in node[pos].attrib.items()
)
# node[pos] contains some text to translate
or hastext(node[pos])
# node[pos] has no text, but there is some text after it
or hastext(node, pos + 1)
)
)
)
def process(node):
""" Translate the given node. """
if (
isinstance(node, SKIPPED_ELEMENT_TYPES)
or node.tag in SKIPPED_ELEMENTS
or node.get('t-translation', "").strip() == "off"
or node.tag == 'attribute' and node.get('name') not in TRANSLATED_ATTRS
or node.getparent() is None and avoid_pattern.match(node.text or "")
):
return
pos = 0
while True:
# check for some text to translate at the given position
if hastext(node, pos):
# move all translatable children nodes from the given position
# into a <div> element
div = etree.Element('div')
div.text = (node[pos-1].tail if pos else node.text) or ''
while pos < len(node) and translatable(node[pos]):
div.append(node[pos])
# translate the content of the <div> element as a whole
content = serialize(div)[5:-6]
original = content.strip()
translated = callback(original)
if translated:
result = content.replace(original, translated)
div = parse_html(f"<div>{result}</div>")
if pos:
node[pos-1].tail = div.text
else:
node.text = div.text
# move the content of the <div> element back inside node
while len(div) > 0:
node.insert(pos, div[0])
pos += 1
if pos >= len(node):
break
# node[pos] is not translatable as a whole, process it recursively
process(node[pos])
pos += 1
# translate the attributes of the node
for key, val in node.attrib.items():
if nonspace(val) and key in TRANSLATED_ATTRS and TRANSLATED_ATTRS[key](node):
node.set(key, callback(val.strip()) or val)
process(node)
return node
def parse_xml(text):
return etree.fromstring(text)
def serialize_xml(node):
return etree.tostring(node, method='xml', encoding='unicode')
_HTML_PARSER = etree.HTMLParser(encoding='utf8')
def parse_html(text):
return html.fragment_fromstring(text, parser=_HTML_PARSER)
def serialize_html(node):
return etree.tostring(node, method='html', encoding='unicode')
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
root = parse_xml(value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
return serialize_xml(result)
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
root = parse_html(u"<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
# remove tags <div> and </div> from result
return serialize_xml(result)[5:-6]
def html_translate(callback, value):
""" Translate an HTML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
# value may be some HTML fragment, wrap it into a div
root = parse_html("<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_html, serialize_html)
# remove tags <div> and </div> from result
value = serialize_html(result)[5:-6]
except ValueError:
_logger.exception("Cannot translate malformed HTML, using source value instead")
return value
#
# Warning: better use self.env['ir.translation']._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, str(name), source, source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, source, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
def translate_sql_constraint(cr, key, lang):
cr.execute("""
SELECT COALESCE(t.value, c.message) as message
FROM ir_model_constraint c
LEFT JOIN
(SELECT res_id, value FROM ir_translation
WHERE type='model'
AND name='ir.model.constraint,message'
AND lang=%s
AND value!='') AS t
ON c.id=t.res_id
WHERE name=%s and type='u'
""", (lang, key))
return cr.fetchone()[0]
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.current_thread(), 'dbname', None)
if db_name:
return odoo.sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from odoo.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from odoo.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the original uid, so the language may
# be wrong when the admin language differs.
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if cr and uid:
env = odoo.api.Environment(cr, uid, {})
lang = env['res.users'].context_get()['lang']
return lang
def __call__(self, source, *args, **kwargs):
translation = self._get_translation(source)
assert not (args and kwargs)
if args or kwargs:
try:
return translation % (args or kwargs)
except (TypeError, ValueError, KeyError):
bad = translation
# fallback: apply to source before logging exception (in case source fails)
translation = source % (args or kwargs)
_logger.exception('Bad translation %r for string %r', bad, source)
return translation
def _get_translation(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
res = env['ir.translation']._get_source(None, ('code',), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res or ''
@functools.total_ordering
class _lt:
""" Lazy code translation
Similar to GettextAlias but the translation lookup will be done only at
__str__ execution.
A code using translated global variables such as:
LABEL = _lt("User")
def _compute_label(self):
context = {'lang': self.partner_id.lang}
self.user_label = LABEL
works as expected (unlike the classic GettextAlias implementation).
"""
__slots__ = ['_source', '_args']
def __init__(self, source, *args, **kwargs):
self._source = source
assert not (args and kwargs)
self._args = args or kwargs
def __str__(self):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
translation = _._get_translation(self._source)
if self._args:
try:
return translation % self._args
except (TypeError, ValueError, KeyError):
bad = translation
# fallback: apply to source before logging exception (in case source fails)
translation = self._source % self._args
_logger.exception('Bad translation %r for string %r', bad, self._source)
return translation
def __eq__(self, other):
""" Prevent using equal operators
Prevent direct comparisons with ``self``.
One should compare the translation of ``self._source`` as ``str(self) == X``.
"""
raise NotImplementedError()
def __lt__(self, other):
raise NotImplementedError()
def __add__(self, other):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
if isinstance(other, str):
return _._get_translation(self._source) + other
elif isinstance(other, _lt):
return _._get_translation(self._source) + _._get_translation(other._source)
return NotImplemented
def __radd__(self, other):
# Call _._get_translation() like _() does, so that we have the same number
# of stack frames calling _get_translation()
if isinstance(other, str):
return other + _._get_translation(self._source)
return NotImplemented
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', 't': '\t',}
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
def TranslationFileReader(source, fileformat='po'):
""" Iterate over translation file to return Odoo translation entries """
if fileformat == 'csv':
return CSVFileReader(source)
if fileformat == 'po':
return PoFileReader(source)
_logger.info('Bad file format: %s', fileformat)
raise Exception(_('Bad file format: %s', fileformat))
class CSVFileReader:
def __init__(self, source):
_reader = codecs.getreader('utf-8')
self.source = csv.DictReader(_reader(source), quotechar='"', delimiter=',')
self.prev_code_src = ""
def __iter__(self):
for entry in self.source:
# determine <module>.<imd_name> from res_id
if entry["res_id"] and entry["res_id"].isnumeric():
# res_id is an id or line number
entry["res_id"] = int(entry["res_id"])
elif not entry.get("imd_name"):
# res_id is an external id and must follow <module>.<name>
entry["module"], entry["imd_name"] = entry["res_id"].split(".")
entry["res_id"] = None
if entry["type"] == "model" or entry["type"] == "model_terms":
entry["imd_model"] = entry["name"].partition(',')[0]
if entry["type"] == "code":
if entry["src"] == self.prev_code_src:
# skip entry due to unicity constrain on code translations
continue
self.prev_code_src = entry["src"]
yield entry
class PoFileReader:
""" Iterate over po file to return Odoo translation entries """
def __init__(self, source):
def get_pot_path(source_name):
# when fileobj is a TemporaryFile, its name is an inter in P3, a string in P2
if isinstance(source_name, str) and source_name.endswith('.po'):
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
path = Path(source_name)
filename = path.parent.parent.name + '.pot'
pot_path = path.with_name(filename)
return pot_path.exists() and str(pot_path) or False
return False
# polib accepts a path or the file content as a string, not a fileobj
if isinstance(source, str):
self.pofile = polib.pofile(source)
pot_path = get_pot_path(source)
else:
# either a BufferedIOBase or result from NamedTemporaryFile
self.pofile = polib.pofile(source.read().decode())
pot_path = get_pot_path(source.name)
if pot_path:
# Make a reader for the POT file
# (Because the POT comments are correct on GitHub but the
# PO comments tends to be outdated. See LP bug 933496.)
self.pofile.merge(polib.pofile(pot_path))
def __iter__(self):
for entry in self.pofile:
if entry.obsolete:
continue
# in case of moduleS keep only the first
match = re.match(r"(module[s]?): (\w+)", entry.comment)
_, module = match.groups()
comments = "\n".join([c for c in entry.comment.split('\n') if not c.startswith('module:')])
source = entry.msgid
translation = entry.msgstr
found_code_occurrence = False
for occurrence, line_number in entry.occurrences:
match = re.match(r'(model|model_terms):([\w.]+),([\w]+):(\w+)\.([^ ]+)', occurrence)
if match:
type, model_name, field_name, module, xmlid = match.groups()
yield {
'type': type,
'imd_model': model_name,
'name': model_name+','+field_name,
'imd_name': xmlid,
'res_id': None,
'src': source,
'value': translation,
'comments': comments,
'module': module,
}
continue
match = re.match(r'(code):([\w/.]+)', occurrence)
if match:
type, name = match.groups()
if found_code_occurrence:
# unicity constrain on code translation
continue
found_code_occurrence = True
yield {
'type': type,
'name': name,
'src': source,
'value': translation,
'comments': comments,
'res_id': int(line_number),
'module': module,
}
continue
match = re.match(r'(selection):([\w.]+),([\w]+)', occurrence)
if match:
_logger.info("Skipped deprecated occurrence %s", occurrence)
continue
match = re.match(r'(sql_constraint|constraint):([\w.]+)', occurrence)
if match:
_logger.info("Skipped deprecated occurrence %s", occurrence)
continue
_logger.error("malformed po file: unknown occurrence: %s", occurrence)
def TranslationFileWriter(target, fileformat='po', lang=None):
""" Iterate over translation file to return Odoo translation entries """
if fileformat == 'csv':
return CSVFileWriter(target)
if fileformat == 'po':
return PoFileWriter(target, lang=lang)
if fileformat == 'tgz':
return TarFileWriter(target, lang=lang)
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).') % fileformat)
class CSVFileWriter:
def __init__(self, target):
self.writer = pycompat.csv_writer(target, dialect='UNIX')
# write header first
self.writer.writerow(("module","type","name","res_id","src","value","comments"))
def write_rows(self, rows):
for module, type, name, res_id, src, trad, comments in rows:
comments = '\n'.join(comments)
self.writer.writerow((module, type, name, res_id, src, trad, comments))
class PoFileWriter:
""" Iterate over po file to return Odoo translation entries """
def __init__(self, target, lang):
self.buffer = target
self.lang = lang
self.po = polib.POFile()
def write_rows(self, rows):
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
modules = set([])
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
modules.add(module)
for src, row in sorted(grouped_rows.items()):
if not self.lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = ''
self.add_entry(row['modules'], sorted(row['tnrs']), src, row['translation'], row['comments'])
import odoo.release as release
self.po.header = "Translation of %s.\n" \
"This file contains the translation of the following modules:\n" \
"%s" % (release.description, ''.join("\t* %s\n" % m for m in modules))
now = datetime.utcnow().strftime('%Y-%m-%d %H:%M+0000')
self.po.metadata = {
'Project-Id-Version': "%s %s" % (release.description, release.version),
'Report-Msgid-Bugs-To': '',
'POT-Creation-Date': now,
'PO-Revision-Date': now,
'Last-Translator': '',
'Language-Team': '',
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=UTF-8',
'Content-Transfer-Encoding': '',
'Plural-Forms': '',
}
# buffer expects bytes
self.buffer.write(str(self.po).encode())
def add_entry(self, modules, tnrs, source, trad, comments=None):
entry = polib.POEntry(
msgid=source,
msgstr=trad,
)
plural = len(modules) > 1 and 's' or ''
entry.comment = "module%s: %s" % (plural, ', '.join(modules))
if comments:
entry.comment += "\n" + "\n".join(comments)
code = False
for typy, name, res_id in tnrs:
if typy == 'code':
code = True
res_id = 0
if isinstance(res_id, int) or res_id.isdigit():
# second term of occurrence must be a digit
# occurrence line at 0 are discarded when rendered to string
entry.occurrences.append((u"%s:%s" % (typy, name), str(res_id)))
else:
entry.occurrences.append((u"%s:%s:%s" % (typy, name, res_id), ''))
if code:
entry.flags.append("python-format")
self.po.append(entry)
class TarFileWriter:
def __init__(self, target, lang):
self.tar = tarfile.open(fileobj=target, mode='w|gz')
self.lang = lang
def write_rows(self, rows):
rows_by_module = defaultdict(list)
for row in rows:
module = row[0]
rows_by_module[module].append(row)
for mod, modrows in rows_by_module.items():
with io.BytesIO() as buf:
po = PoFileWriter(buf, lang=self.lang)
po.write_rows(modrows)
buf.seek(0)
info = tarfile.TarInfo(
join(mod, 'i18n', '{basename}.{ext}'.format(
basename=self.lang or mod,
ext='po' if self.lang else 'pot',
)))
# addfile will read <size> bytes from the buffer so
# size *must* be set first
info.size = len(buf.getvalue())
self.tar.addfile(info, fileobj=buf)
self.tar.close()
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
reader = TranslationModuleReader(cr, modules=modules, lang=lang)
writer = TranslationFileWriter(buffer, fileformat=format, lang=lang)
writer.write_rows(reader)
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip()
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
# Do not export terms contained on the Component directive of OWL
# attributes in this context are most of the time variables,
# not real HTML attributes.
# Node tags starting with a capital letter are considered OWL Components
# and a widespread convention and good practice for DOM tags is to write
# them all lower case.
# https://www.w3schools.com/html/html5_syntax.asp
# https://github.com/odoo/owl/blob/master/doc/reference/component.md#composition
if not el.tag[0].isupper() and 't-component' not in el.attrib:
for att in ('title', 'alt', 'label', 'placeholder', 'aria-label'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
ImdInfo = namedtuple('ExternalId', ['name', 'model', 'res_id', 'module'])
class TranslationModuleReader:
""" Retrieve translated records per module
:param cr: cursor to database to export
:param modules: list of modules to filter the exported terms, can be ['all']
records with no external id are always ignored
:param lang: language code to retrieve the translations
retrieve source terms only if not set
"""
def __init__(self, cr, modules=None, lang=None):
self._cr = cr
self._modules = modules or ['all']
self._lang = lang
self.env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
self._to_translate = []
self._path_list = [(path, True) for path in odoo.addons.__path__]
self._installed_modules = [
m['name']
for m in self.env['ir.module.module'].search_read([('state', '=', 'installed')], fields=['name'])
]
self._export_translatable_records()
self._export_translatable_resources()
def __iter__(self):
""" Export ir.translation values for all retrieved records """
IrTranslation = self.env['ir.translation']
for module, source, name, res_id, ttype, comments, record_id in self._to_translate:
trans = (
IrTranslation._get_source(name if type != "code" else None, ttype, self._lang, source, res_id=record_id)
if self._lang
else ""
)
yield (module, ttype, name, res_id, source, encode(trans) or '', comments)
def _push_translation(self, module, ttype, name, res_id, source, comments=None, record_id=None):
""" Insert a translation that will be used in the file generation
In po file will create an entry
#: <ttype>:<name>:<res_id>
#, <comment>
msgid "<source>"
record_id is the database id of the record being translated
"""
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
sanitized_term = (source or '').strip()
# remove non-alphanumeric chars
sanitized_term = re.sub(r'\W+', '', sanitized_term)
if not sanitized_term or len(sanitized_term) <= 1:
return
self._to_translate.append((module, source, name, res_id, ttype, tuple(comments or ()), record_id))
def _get_translatable_records(self, imd_records):
""" Filter the records that are translatable
A record is considered as untranslatable if:
- it does not exist
- the model is flagged with _translate=False
- it is a field of a model flagged with _translate=False
- it is a selection of a field of a model flagged with _translate=False
:param records: a list of namedtuple ImdInfo belonging to the same model
"""
model = next(iter(imd_records)).model
if model not in self.env:
_logger.error("Unable to find object %r", model)
return self.env["_unknown"].browse()
if not self.env[model]._translate:
return self.env[model].browse()
res_ids = [r.res_id for r in imd_records]
records = self.env[model].browse(res_ids).exists()
if len(records) < len(res_ids):
missing_ids = set(res_ids) - set(records.ids)
missing_records = [f"{r.module}.{r.name}" for r in imd_records if r.res_id in missing_ids]
_logger.warning("Unable to find records of type %r with external ids %s", model, ', '.join(missing_records))
if not records:
return records
if model == 'ir.model.fields.selection':
fields = defaultdict(list)
for selection in records:
fields[selection.field_id] = selection
for field, selection in fields.items():
field_name = field.name
field_model = self.env.get(field.model)
if (field_model is None or not field_model._translate or
field_name not in field_model._fields):
# the selection is linked to a model with _translate=False, remove it
records -= selection
elif model == 'ir.model.fields':
for field in records:
field_name = field.name
field_model = self.env.get(field.model)
if (field_model is None or not field_model._translate or
field_name not in field_model._fields):
# the field is linked to a model with _translate=False, remove it
records -= field
return records
def _export_translatable_records(self):
""" Export translations of all translated records having an external id """
query = """SELECT min(name), model, res_id, module
FROM ir_model_data
WHERE module = ANY(%s)
GROUP BY model, res_id, module
ORDER BY module, model, min(name)"""
if 'all' not in self._modules:
query_param = list(self._modules)
else:
query_param = self._installed_modules
self._cr.execute(query, (query_param,))
records_per_model = defaultdict(dict)
for (xml_name, model, res_id, module) in self._cr.fetchall():
records_per_model[model][res_id] = ImdInfo(xml_name, model, res_id, module)
for model, imd_per_id in records_per_model.items():
records = self._get_translatable_records(imd_per_id.values())
if not records:
continue
for record in records:
module = imd_per_id[record.id].module
xml_name = "%s.%s" % (module, imd_per_id[record.id].name)
for field_name, field in record._fields.items():
if field.translate:
name = model + "," + field_name
try:
value = record[field_name] or ''
except Exception:
continue
for term in set(field.get_trans_terms(value)):
trans_type = 'model_terms' if callable(field.translate) else 'model'
self._push_translation(module, trans_type, name, xml_name, term, record_id=record.id)
def _get_module_from_path(self, path):
for (mp, rec) in self._path_list:
mp = os.path.join(mp, '')
dirname = os.path.join(os.path.dirname(path), '')
if rec and path.startswith(mp) and dirname != mp:
path = path[len(mp):]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def _verified_module_filepaths(self, fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = self._get_module_from_path(fabsolutepath)
if ('all' in self._modules or module in self._modules) and module in self._installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def _babel_extract_terms(self, fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = self._verified_module_filepaths(fname, path, root)
if not module:
return
extra_comments = extra_comments or []
src_file = open(fabsolutepath, 'rb')
options = {}
if extract_method == 'python':
options['encoding'] = 'UTF-8'
try:
for extracted in extract.extract(extract_method, src_file, keywords=extract_keywords, options=options):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
self._push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
def _export_translatable_resources(self):
""" Export translations for static terms
This will include:
- the python strings marked with _() or _lt()
- the javascript strings marked with _t() or _lt() inside static/src/js/
- the strings inside Qweb files inside static/src/xml/
"""
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
self._path_list.append((os.path.join(config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
self._path_list.append((config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", self._path_list)
for (path, recursive) in self._path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in os.walk(path, followlinks=True):
for fname in fnmatch.filter(files, '*.py'):
self._babel_extract_terms(fname, path, root,
extract_keywords={'_': None, '_lt': None})
if fnmatch.fnmatch(root, '*/static/src*'):
# Javascript source files
for fname in fnmatch.filter(files, '*.js'):
self._babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
for fname in fnmatch.filter(files, '*.xml'):
self._babel_extract_terms(fname, path, root, 'odoo.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
def trans_load(cr, filename, lang, verbose=True, create_empty_translation=False, overwrite=False):
try:
with file_open(filename, mode='rb') as fileobj:
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
return trans_load_data(cr, fileobj, fileformat, lang,
verbose=verbose,
create_empty_translation=create_empty_translation,
overwrite=overwrite)
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang,
verbose=True, create_empty_translation=False, overwrite=False):
"""Populates the ir_translation table.
:param fileobj: buffer open to a translation file
:param fileformat: format of the `fielobj` file, one of 'po' or 'csv'
:param lang: language code of the translations contained in `fileobj`
language must be present and activated in the database
:param verbose: increase log output
:param create_empty_translation: create an ir.translation record, even if no value
is provided in the translation entry
:param overwrite: if an ir.translation already exists for a term, replace it with
the one in `fileobj`
"""
if verbose:
_logger.info('loading translation file for language %s', lang)
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
try:
if not env['res.lang']._lang_get(lang):
_logger.error("Couldn't read translation for lang '%s', language not found", lang)
return None
# now, the serious things: we read the language file
fileobj.seek(0)
reader = TranslationFileReader(fileobj, fileformat=fileformat)
# read the rest of the file with a cursor-like object for fast inserting translations"
Translation = env['ir.translation']
irt_cursor = Translation._get_import_cursor(overwrite)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(row)
# do not import empty values
if not create_empty_translation and not dic['value']:
return
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
irt_cursor.finish()
Translation.clear_caches()
if verbose:
_logger.info("translation file loaded successfully")
except IOError:
iso_lang = get_iso_codes(lang)
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
""" Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
installer = env['base.language.install'].create({'lang': lang})
installer.lang_install()
| 39.292512 | 50,373 |
13,851 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from __future__ import print_function
import builtins
import math
def round(f):
# P3's builtin round differs from P2 in the following manner:
# * it rounds half to even rather than up (away from 0)
# * round(-0.) loses the sign (it returns -0 rather than 0)
# * round(x) returns an int rather than a float
#
# this compatibility shim implements Python 2's round in terms of
# Python 3's so that important rounding error under P3 can be
# trivially fixed, assuming the P2 behaviour to be debugged and
# correct.
roundf = builtins.round(f)
if builtins.round(f + 1) - roundf != 1:
return f + math.copysign(0.5, f)
# copysign ensures round(-0.) -> -0 *and* result is a float
return math.copysign(roundf, f)
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
assert precision_rounding is None or precision_rounding > 0,\
"precision_rounding must be positive, got %s" % precision_rounding
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
HALF-UP (away from zero).
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used: 'HALF-UP', 'UP' or 'DOWN',
the first one rounding up to the closest number with the rule that
number>=0.5 is rounded up to 1, the second always rounding up and the
latest one always rounding down.
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0:
return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
sign = math.copysign(1.0, normalized_value)
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-52)
# TIE-BREAKING: UP/DOWN (for ceiling[resp. flooring] operations)
# When rounding the value up[resp. down], we instead subtract[resp. add] the epsilon value
# as the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up[resp. down] to the next number
# The math.ceil[resp. math.floor] operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
if rounding_method == 'UP':
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value)) * sign
elif rounding_method == 'DOWN':
normalized_value += sign*epsilon
rounded_value = math.floor(abs(normalized_value)) * sign
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
else:
normalized_value += math.copysign(epsilon, normalized_value)
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrinsic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
_float_repr = float_repr
def float_split_str(value, precision_digits):
"""Splits the given float 'value' in its unitary and decimal parts,
returning each of them as a string, rounding the value using
the provided ``precision_digits`` argument.
The length of the string returned for decimal places will always
be equal to ``precision_digits``, adding zeros at the end if needed.
In case ``precision_digits`` is zero, an empty string is returned for
the decimal places.
Examples:
1.432 with precision 2 => ('1', '43')
1.49 with precision 1 => ('1', '5')
1.1 with precision 3 => ('1', '100')
1.12 with precision 0 => ('1', '')
:param float value: value to split.
:param int precision_digits: number of fractional digits to round to.
:return: returns the tuple(<unitary part>, <decimal part>) of the given value
:rtype: tuple(str, str)
"""
value = float_round(value, precision_digits=precision_digits)
value_repr = float_repr(value, precision_digits)
return tuple(value_repr.split('.')) if precision_digits else (value_repr, '')
def float_split(value, precision_digits):
""" same as float_split_str() except that it returns the unitary and decimal
parts as integers instead of strings. In case ``precision_digits`` is zero,
0 is always returned as decimal part.
:rtype: tuple(int, int)
"""
units, cents = float_split_str(value, precision_digits)
if not cents:
return int(units), 0
return int(units), int(cents)
def json_float_round(value, precision_digits, rounding_method='HALF-UP'):
"""Not suitable for float calculations! Similar to float_repr except that it
returns a float suitable for json dump
This may be necessary to produce "exact" representations of rounded float
values during serialization, such as what is done by `json.dumps()`.
Unfortunately `json.dumps` does not allow any form of custom float representation,
nor any custom types, everything is serialized from the basic JSON types.
:param int precision_digits: number of fractional digits to round to.
:param rounding_method: the rounding method used: 'HALF-UP', 'UP' or 'DOWN',
the first one rounding up to the closest number with the rule that
number>=0.5 is rounded up to 1, the second always rounding up and the
latest one always rounding down.
:return: a rounded float value that must not be used for calculations, but
is ready to be serialized in JSON with minimal chances of
representation errors.
"""
rounded_value = float_round(value, precision_digits=precision_digits, rounding_method=rounding_method)
rounded_repr = float_repr(rounded_value, precision_digits=precision_digits)
# As of Python 3.1, rounded_repr should be the shortest representation for our
# rounded float, so we create a new float whose repr is expected
# to be the same value, or a value that is semantically identical
# and will be used in the json serialization.
# e.g. if rounded_repr is '3.1750', the new float repr could be 3.175
# but not 3.174999999999322452.
# Cfr. bpo-1580: https://bugs.python.org/issue1580
return float(rounded_repr)
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print('###!!! Rounding error: got %s , expected %s' % (result, expected))
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for frac, exp, prec in zip(fractions, expecteds, precisions):
for sign in [-1,1]:
for x in range(0, 10000, 97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print(count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs')
| 49.117021 | 13,851 |
1,714 |
py
|
PYTHON
|
15.0
|
import ast
import os
import logging
from shutil import copyfileobj
_logger = logging.getLogger(__name__)
from werkzeug.datastructures import FileStorage
try:
from xlrd import xlsx
except ImportError:
pass
else:
from lxml import etree
# xlrd.xlsx supports defusedxml, defusedxml's etree interface is broken
# (missing ElementTree and thus ElementTree.iter) which causes a fallback to
# Element.getiterator(), triggering a warning before 3.9 and an error from 3.9.
#
# We have defusedxml installed because zeep has a hard dep on defused and
# doesn't want to drop it (mvantellingen/python-zeep#1014).
#
# Ignore the check and set the relevant flags directly using lxml as we have a
# hard dependency on it.
xlsx.ET = etree
xlsx.ET_has_iterparse = True
xlsx.Element_has_iter = True
FileStorage.save = lambda self, dst, buffer_size=1<<20: copyfileobj(self.stream, dst, buffer_size)
orig_literal_eval = ast.literal_eval
def literal_eval(expr):
# limit the size of the expression to avoid segmentation faults
# the default limit is set to 100KiB
# can be overridden by setting the ODOO_LIMIT_LITEVAL_BUFFER buffer_size_environment variable
buffer_size = 102400
buffer_size_env = os.getenv("ODOO_LIMIT_LITEVAL_BUFFER")
if buffer_size_env:
if buffer_size_env.isdigit():
buffer_size = int(buffer_size_env)
else:
_logger.error("ODOO_LIMIT_LITEVAL_BUFFER has to be an integer, defaulting to 100KiB")
if isinstance(expr, str) and len(expr) > buffer_size:
raise ValueError("expression can't exceed buffer limit")
return orig_literal_eval(expr)
ast.literal_eval = literal_eval
| 32.961538 | 1,714 |
40,237 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import configparser as ConfigParser
import errno
import logging
import optparse
import glob
import os
import sys
import tempfile
import warnings
import odoo
from os.path import expandvars, expanduser, abspath, realpath
from .. import release, conf, loglevels
from . import appdirs
from passlib.context import CryptContext
crypt_context = CryptContext(schemes=['pbkdf2_sha512', 'plaintext'],
deprecated=['plaintext'])
class MyOption (optparse.Option, object):
""" optparse Option with two additional attributes.
The list of command line options (getopt.Option) is used to create the
list of the configuration file options. When reading the file, and then
reading the command line arguments, we don't want optparse.parse results
to override the configuration file values. But if we provide default
values to optparse, optparse will return them and we can't know if they
were really provided by the user or not. A solution is to not use
optparse's default attribute, but use a custom one (that will be copied
to create the default values of the configuration file).
"""
def __init__(self, *opts, **attrs):
self.my_default = attrs.pop('my_default', None)
super(MyOption, self).__init__(*opts, **attrs)
DEFAULT_LOG_HANDLER = ':INFO'
def _get_default_datadir():
home = os.path.expanduser('~')
if os.path.isdir(home):
func = appdirs.user_data_dir
else:
if sys.platform in ['win32', 'darwin']:
func = appdirs.site_data_dir
else:
func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower()
# No "version" kwarg as session and filestore paths are shared against series
return func(appname=release.product_name, appauthor=release.author)
def _deduplicate_loggers(loggers):
""" Avoid saving multiple logging levels for the same loggers to a save
file, that just takes space and the list can potentially grow unbounded
if for some odd reason people use :option`--save`` all the time.
"""
# dict(iterable) -> the last item of iterable for any given key wins,
# which is what we want and expect. Output order should not matter as
# there are no duplicates within the output sequence
return (
'{}:{}'.format(logger, level)
for logger, level in dict(it.split(':') for it in loggers).items()
)
class configmanager(object):
def __init__(self, fname=None):
"""Constructor.
:param fname: a shortcut allowing to instantiate :class:`configmanager`
from Python code without resorting to environment
variable
"""
# Options not exposed on the command line. Command line options will be added
# from optparse's parser.
self.options = {
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
'reportgz': False,
'root_path': None,
}
# Not exposed in the configuration file.
self.blacklist_for_save = set([
'publisher_warranty_url', 'load_language', 'root_path',
'init', 'save', 'config', 'update', 'stop_after_init', 'dev_mode', 'shell_interface'
])
# dictionary mapping option destination (keys in self.options) to MyOptions.
self.casts = {}
self.misc = {}
self.config_file = fname
self._LOGLEVELS = dict([
(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
])
version = "%s %s" % (release.description, release.version)
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
# Server startup config
group = optparse.OptionGroup(parser, "Common options")
group.add_option("-c", "--config", dest="config", help="specify alternate config file")
group.add_option("-s", "--save", action="store_true", dest="save", default=False,
help="save configuration to ~/.odoorc (or to ~/.openerp_serverrc if it exists)")
group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d")
group.add_option("-u", "--update", dest="update",
help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.")
group.add_option("--without-demo", dest="without_demo",
help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default",
my_default=False)
group.add_option("-P", "--import-partial", dest="import_partial", my_default='',
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
group.add_option("--addons-path", dest="addons_path",
help="specify additional addons paths (separated by commas).",
action="callback", callback=self._check_addons_path, nargs=1, type="string")
group.add_option("--upgrade-path", dest="upgrade_path",
help="specify an additional upgrade path.",
action="callback", callback=self._check_upgrade_path, nargs=1, type="string")
group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules.", my_default='base,web')
group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(),
help="Directory where to store Odoo data")
parser.add_option_group(group)
# HTTP
group = optparse.OptionGroup(parser, "HTTP Service Configuration")
group.add_option("--http-interface", dest="http_interface", my_default='',
help="Listen interface address for HTTP services. "
"Keep empty to listen on all interfaces (0.0.0.0)")
group.add_option("-p", "--http-port", dest="http_port", my_default=8069,
help="Listen port for the main HTTP service", type="int", metavar="PORT")
group.add_option("--longpolling-port", dest="longpolling_port", my_default=8072,
help="Listen port for the longpolling HTTP service", type="int", metavar="PORT")
group.add_option("--no-http", dest="http_enable", action="store_false", my_default=True,
help="Disable the HTTP and Longpolling services entirely")
group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False,
help="Activate reverse proxy WSGI wrappers (headers rewriting) "
"Only enable this when running behind a trusted web proxy!")
# HTTP: hidden backwards-compatibility for "*xmlrpc*" options
hidden = optparse.SUPPRESS_HELP
group.add_option("--xmlrpc-interface", dest="http_interface", help=hidden)
group.add_option("--xmlrpc-port", dest="http_port", type="int", help=hidden)
group.add_option("--no-xmlrpc", dest="http_enable", action="store_false", help=hidden)
parser.add_option_group(group)
# WEB
group = optparse.OptionGroup(parser, "Web interface Configuration")
group.add_option("--db-filter", dest="dbfilter", my_default='', metavar="REGEXP",
help="Regular expressions for filtering available databases for Web UI. "
"The expression can use %d (domain) and %h (host) placeholders.")
parser.add_option_group(group)
# Testing Group
group = optparse.OptionGroup(parser, "Testing Configuration")
group.add_option("--test-file", dest="test_file", my_default=False,
help="Launch a python test file.")
group.add_option("--test-enable", action="callback", callback=self._test_enable_callback,
dest='test_enable',
help="Enable unit tests.")
group.add_option("--test-tags", dest="test_tags",
help="Comma-separated list of specs to filter which tests to execute. Enable unit tests if set. "
"A filter spec has the format: [-][tag][/module][:class][.method] "
"The '-' specifies if we want to include or exclude tests matching this spec. "
"The tag will match tags added on a class with a @tagged decorator "
"(all Test classes have 'standard' and 'at_install' tags "
"until explicitly removed, see the decorator documentation). "
"'*' will match all tags. "
"If tag is omitted on include mode, its value is 'standard'. "
"If tag is omitted on exclude mode, its value is '*'. "
"The module, class, and method will respectively match the module name, test class name and test method name. "
"Example: --test-tags :TestClass.test_func,/test_module,external "
"Filtering and executing the tests happens twice: right "
"after each module installation/update and at the end "
"of the modules loading. At each stage tests are filtered "
"by --test-tags specs and additionally by dynamic specs "
"'at_install' and 'post_install' correspondingly.")
group.add_option("--screencasts", dest="screencasts", action="store", my_default=None,
metavar='DIR',
help="Screencasts will go in DIR/{db_name}/screencasts.")
temp_tests_dir = os.path.join(tempfile.gettempdir(), 'odoo_tests')
group.add_option("--screenshots", dest="screenshots", action="store", my_default=temp_tests_dir,
metavar='DIR',
help="Screenshots will go in DIR/{db_name}/screenshots. Defaults to %s." % temp_tests_dir)
parser.add_option_group(group)
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "odoo.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
group.add_option('--log-request', action="append_const", dest="log_handler", const="odoo.http.rpc.request:DEBUG", help='shortcut for --log-handler=odoo.http.rpc.request:DEBUG')
group.add_option('--log-response', action="append_const", dest="log_handler", const="odoo.http.rpc.response:DEBUG", help='shortcut for --log-handler=odoo.http.rpc.response:DEBUG')
group.add_option('--log-web', action="append_const", dest="log_handler", const="odoo.http:DEBUG", help='shortcut for --log-handler=odoo.http:DEBUG')
group.add_option('--log-sql', action="append_const", dest="log_handler", const="odoo.sql_db:DEBUG", help='shortcut for --log-handler=odoo.sql_db:DEBUG')
group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False)
group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level")
# For backward-compatibility, map the old log levels to something
# quite close.
levels = [
'info', 'debug_rpc', 'warn', 'test', 'critical', 'runbot',
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset'
]
group.add_option('--log-level', dest='log_level', type='choice',
choices=levels, my_default='info',
help='specify the level of the logging. Accepted values: %s.' % (levels,))
parser.add_option_group(group)
# SMTP Group
group = optparse.OptionGroup(parser, "SMTP Configuration")
group.add_option('--email-from', dest='email_from', my_default=False,
help='specify the SMTP email address for sending email')
group.add_option('--from-filter', dest='from_filter', my_default=False,
help='specify for which email address the SMTP configuration can be used')
group.add_option('--smtp', dest='smtp_server', my_default='localhost',
help='specify the SMTP server for sending email')
group.add_option('--smtp-port', dest='smtp_port', my_default=25,
help='specify the SMTP port', type="int")
group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False,
help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)')
group.add_option('--smtp-user', dest='smtp_user', my_default=False,
help='specify the SMTP username for sending email')
group.add_option('--smtp-password', dest='smtp_password', my_default=False,
help='specify the SMTP password for sending email')
group.add_option('--smtp-ssl-certificate-filename', dest='smtp_ssl_certificate_filename', my_default=False,
help='specify the SSL certificate used for authentication')
group.add_option('--smtp-ssl-private-key-filename', dest='smtp_ssl_private_key_filename', my_default=False,
help='specify the SSL private key used for authentication')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Database related options")
group.add_option("-d", "--database", dest="db_name", my_default=False,
help="specify the database name")
group.add_option("-r", "--db_user", dest="db_user", my_default=False,
help="specify the database user name")
group.add_option("-w", "--db_password", dest="db_password", my_default=False,
help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", my_default=False,
help="specify the database host")
group.add_option("--db_port", dest="db_port", my_default=False,
help="specify the database port", type="int")
group.add_option("--db_sslmode", dest="db_sslmode", type="choice", my_default='prefer',
choices=['disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full'],
help="specify the database ssl connection mode (see PostgreSQL documentation)")
group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64,
help="specify the maximum number of physical connections to PostgreSQL")
group.add_option("--db-template", dest="db_template", my_default="template0",
help="specify a custom database template to create a new database")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Internationalisation options",
"Use these options to translate Odoo to another language. "
"See i18n section of the user manual. Option '-d' is mandatory. "
"Option '-l' is mandatory in case of importation"
)
group.add_option('--load-language', dest="load_language",
help="specifies the languages for the translations you want to be loaded")
group.add_option('-l', "--language", dest="language",
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
group.add_option("--i18n-export", dest="translate_out",
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
group.add_option("--i18n-import", dest="translate_in",
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False,
help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.")
group.add_option("--modules", dest="translate_modules",
help="specify modules to export. Use in combination with --i18n-export")
parser.add_option_group(group)
security = optparse.OptionGroup(parser, 'Security-related options')
security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True,
help="Disable the ability to obtain or view the list of databases. "
"Also disable access to the database manager and selector, "
"so be sure to set a proper --database parameter first")
parser.add_option_group(security)
# Advanced options
group = optparse.OptionGroup(parser, "Advanced options")
group.add_option('--dev', dest='dev_mode', type="string",
help="Enable developer mode. Param: List of options separated by comma. "
"Options : all, [pudb|wdb|ipdb|pdb], reload, qweb, werkzeug, xml")
group.add_option('--shell-interface', dest='shell_interface', type="string",
help="Specify a preferred REPL to use in shell mode. Supported REPLs are: "
"[ipython|ptpython|bpython|python]")
group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False,
help="stop the server after its initialization")
group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=False,
help="Force a limit on the maximum number of records kept in the virtual "
"osv_memory tables. By default there is no limit.",
type="int")
group.add_option("--transient-age-limit", dest="transient_age_limit", my_default=1.0,
help="Time limit (decimal value in hours) records created with a "
"TransientModel (mostly wizard) are kept in the database. Default to 1 hour.",
type="float")
group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=False,
help="Deprecated alias to the transient-age-limit option",
type="float")
group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2,
help="Maximum number of threads processing concurrently cron jobs (default 2).",
type="int")
group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true",
help="Try to enable the unaccent extension when creating new databases.")
group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLite2-City.mmdb',
help="Absolute path to the GeoIP database file.")
parser.add_option_group(group)
if os.name == 'posix':
group = optparse.OptionGroup(parser, "Multiprocessing options")
# TODO sensible default for the three following limits.
group.add_option("--workers", dest="workers", my_default=0,
help="Specify the number of workers, 0 disable prefork mode.",
type="int")
group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024,
help="Maximum allowed virtual memory per worker (in bytes), when reached the worker be "
"reset after the current request (default 2048MiB).",
type="int")
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
help="Maximum allowed virtual memory per worker (in bytes), when reached, any memory "
"allocation will fail (default 2560MiB).",
type="int")
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
help="Maximum allowed CPU time per request (default 60).",
type="int")
group.add_option("--limit-time-real", dest="limit_time_real", my_default=120,
help="Maximum allowed Real time per request (default 120).",
type="int")
group.add_option("--limit-time-real-cron", dest="limit_time_real_cron", my_default=-1,
help="Maximum allowed Real time per cron job. (default: --limit-time-real). "
"Set to 0 for no limit. ",
type="int")
group.add_option("--limit-request", dest="limit_request", my_default=8192,
help="Maximum number of request to be processed per worker (default 8192).",
type="int")
parser.add_option_group(group)
# Copy all optparse options (i.e. MyOption) into self.options.
for group in parser.option_groups:
for option in group.option_list:
if option.dest not in self.options:
self.options[option.dest] = option.my_default
self.casts[option.dest] = option
# generate default config
self._parse_config()
def parse_config(self, args=None):
""" Parse the configuration file (if any) and the command-line
arguments.
This method initializes odoo.tools.config and openerp.conf (the
former should be removed in the future) with library-wide
configuration values.
This method must be called before proper usage of this library can be
made.
Typical usage of this method:
odoo.tools.config.parse_config(sys.argv[1:])
"""
opt = self._parse_config(args)
odoo.netsvc.init_logger()
self._warn_deprecated_options()
odoo.modules.module.initialize_sys_path()
return opt
def _parse_config(self, args=None):
if args is None:
args = []
opt, args = self.parser.parse_args(args)
def die(cond, msg):
if cond:
self.parser.error(msg)
# Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem)
die(args, "unrecognized parameters: '%s'" % " ".join(args))
die(bool(opt.syslog) and bool(opt.logfile),
"the syslog and logfile options are exclusive")
die(opt.translate_in and (not opt.language or not opt.db_name),
"the i18n-import option cannot be used without the language (-l) and the database (-d) options")
die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update),
"the i18n-overwrite option cannot be used without the i18n-import option or without the update option")
die(opt.translate_out and (not opt.db_name),
"the i18n-export option cannot be used without the database (-d) option")
# Check if the config file exists (-c used, but not -s)
die(not opt.save and opt.config and not os.access(opt.config, os.R_OK),
"The config file '%s' selected with -c/--config doesn't exist or is not readable, "\
"use -s/--save if you want to generate it"% opt.config)
die(bool(opt.osv_memory_age_limit) and bool(opt.transient_memory_age_limit),
"the osv-memory-count-limit option cannot be used with the "
"transient-age-limit option, please only use the latter.")
# place/search the config file on Win32 near the server installation
# (../etc from the server)
# if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write,
# else he won't be able to save the configurations, or even to start the server...
# TODO use appdirs
if os.name == 'nt':
rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'odoo.conf')
else:
rcfilepath = os.path.expanduser('~/.odoorc')
old_rcfilepath = os.path.expanduser('~/.openerp_serverrc')
die(os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath),
"Found '.odoorc' and '.openerp_serverrc' in your path. Please keep only one of "\
"them, preferably '.odoorc'.")
if not os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath):
rcfilepath = old_rcfilepath
self.rcfile = os.path.abspath(
self.config_file or opt.config or os.environ.get('ODOO_RC') or os.environ.get('OPENERP_SERVER') or rcfilepath)
self.load()
# Verify that we want to log or not, if not the output will go to stdout
if self.options['logfile'] in ('None', 'False'):
self.options['logfile'] = False
# the same for the pidfile
if self.options['pidfile'] in ('None', 'False'):
self.options['pidfile'] = False
# the same for the test_tags
if self.options['test_tags'] == 'None':
self.options['test_tags'] = None
# and the server_wide_modules
if self.options['server_wide_modules'] in ('', 'None', 'False'):
self.options['server_wide_modules'] = 'base,web'
# if defined do not take the configfile value even if the defined value is None
keys = ['http_interface', 'http_port', 'longpolling_port', 'http_enable',
'db_name', 'db_user', 'db_password', 'db_host', 'db_sslmode',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'from_filter',
'smtp_ssl_certificate_filename', 'smtp_ssl_private_key_filename',
'db_maxconn', 'import_partial', 'addons_path', 'upgrade_path',
'syslog', 'without_demo', 'screencasts', 'screenshots',
'dbfilter', 'log_level', 'log_db',
'log_db_level', 'geoip_database', 'dev_mode', 'shell_interface'
]
for arg in keys:
# Copy the command-line argument (except the special case for log_handler, due to
# action=append requiring a real default, so we cannot use the my_default workaround)
if getattr(opt, arg, None) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
if isinstance(self.options['log_handler'], str):
self.options['log_handler'] = self.options['log_handler'].split(',')
self.options['log_handler'].extend(opt.log_handler)
# if defined but None take the configfile value
keys = [
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
'dev_mode', 'shell_interface', 'smtp_ssl', 'load_language',
'stop_after_init', 'without_demo', 'http_enable', 'syslog',
'list_db', 'proxy_mode',
'test_file', 'test_tags',
'osv_memory_count_limit', 'osv_memory_age_limit', 'transient_age_limit', 'max_cron_threads', 'unaccent',
'data_dir',
'server_wide_modules',
]
posix_keys = [
'workers',
'limit_memory_hard', 'limit_memory_soft',
'limit_time_cpu', 'limit_time_real', 'limit_request', 'limit_time_real_cron'
]
if os.name == 'posix':
keys += posix_keys
else:
self.options.update(dict.fromkeys(posix_keys, None))
# Copy the command-line arguments...
for arg in keys:
if getattr(opt, arg) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
self.options['root_path'] = self._normalize(os.path.join(os.path.dirname(__file__), '..'))
if not self.options['addons_path'] or self.options['addons_path']=='None':
default_addons = []
base_addons = os.path.join(self.options['root_path'], 'addons')
if os.path.exists(base_addons):
default_addons.append(base_addons)
main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons'))
if os.path.exists(main_addons):
default_addons.append(main_addons)
self.options['addons_path'] = ','.join(default_addons)
else:
self.options['addons_path'] = ",".join(
self._normalize(x)
for x in self.options['addons_path'].split(','))
self.options["upgrade_path"] = (
",".join(self._normalize(x)
for x in self.options['upgrade_path'].split(','))
if self.options['upgrade_path']
else ""
)
self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {}
self.options['demo'] = (dict(self.options['init'])
if not self.options['without_demo'] else {})
self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {}
self.options['translate_modules'] = opt.translate_modules and [m.strip() for m in opt.translate_modules.split(',')] or ['all']
self.options['translate_modules'].sort()
dev_split = opt.dev_mode and [s.strip() for s in opt.dev_mode.split(',')] or []
self.options['dev_mode'] = 'all' in dev_split and dev_split + ['pdb', 'reload', 'qweb', 'werkzeug', 'xml'] or dev_split
if opt.pg_path:
self.options['pg_path'] = opt.pg_path
self.options['test_enable'] = bool(self.options['test_tags'])
if opt.save:
self.save()
# normalize path options
for key in ['data_dir', 'logfile', 'pidfile', 'test_file', 'screencasts', 'screenshots', 'pg_path', 'translate_out', 'translate_in', 'geoip_database']:
self.options[key] = self._normalize(self.options[key])
conf.addons_paths = self.options['addons_path'].split(',')
conf.server_wide_modules = [
m.strip() for m in self.options['server_wide_modules'].split(',') if m.strip()
]
return opt
def _warn_deprecated_options(self):
if self.options['osv_memory_age_limit']:
warnings.warn(
"The osv-memory-age-limit is a deprecated alias to "
"the transient-age-limit option, please use the latter.",
DeprecationWarning)
self.options['transient_age_limit'] = self.options.pop('osv_memory_age_limit')
def _is_addons_path(self, path):
from odoo.modules.module import MANIFEST_NAMES
for f in os.listdir(path):
modpath = os.path.join(path, f)
if os.path.isdir(modpath):
def hasfile(filename):
return os.path.isfile(os.path.join(modpath, filename))
if hasfile('__init__.py') and any(hasfile(mname) for mname in MANIFEST_NAMES):
return True
return False
def _check_addons_path(self, option, opt, value, parser):
ad_paths = []
for path in value.split(','):
path = path.strip()
res = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, res))
if not self._is_addons_path(res):
raise optparse.OptionValueError("option %s: the path %r is not a valid addons directory" % (opt, path))
ad_paths.append(res)
setattr(parser.values, option.dest, ",".join(ad_paths))
def _check_upgrade_path(self, option, opt, value, parser):
upgrade_path = []
for path in value.split(','):
path = path.strip()
res = self._normalize(path)
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
if not self._is_upgrades_path(res):
raise optparse.OptionValueError("option %s: the path %r is not a valid upgrade directory" % (opt, path))
if res not in upgrade_path:
upgrade_path.append(res)
setattr(parser.values, option.dest, ",".join(upgrade_path))
def _is_upgrades_path(self, res):
return any(
glob.glob(os.path.join(res, f"*/*/{prefix}-*.py"))
for prefix in ["pre", "post", "end"]
)
def _test_enable_callback(self, option, opt, value, parser):
if not parser.values.test_tags:
parser.values.test_tags = "+standard"
def load(self):
outdated_options_map = {
'xmlrpc_port': 'http_port',
'xmlrpc_interface': 'http_interface',
'xmlrpc': 'http_enable',
}
p = ConfigParser.RawConfigParser()
try:
p.read([self.rcfile])
for (name,value) in p.items('options'):
name = outdated_options_map.get(name, name)
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.options[name] = value
#parse the other sections, as well
for sec in p.sections():
if sec == 'options':
continue
self.misc.setdefault(sec, {})
for (name, value) in p.items(sec):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.misc[sec][name] = value
except IOError:
pass
except ConfigParser.NoSectionError:
pass
def save(self):
p = ConfigParser.RawConfigParser()
loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS))
p.add_section('options')
for opt in sorted(self.options):
if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update'):
continue
if opt in self.blacklist_for_save:
continue
if opt in ('log_level',):
p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt]))
elif opt == 'log_handler':
p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt])))
else:
p.set('options', opt, self.options[opt])
for sec in sorted(self.misc):
p.add_section(sec)
for opt in sorted(self.misc[sec]):
p.set(sec,opt,self.misc[sec][opt])
# try to create the directories and write the file
try:
rc_exists = os.path.exists(self.rcfile)
if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)):
os.makedirs(os.path.dirname(self.rcfile))
try:
p.write(open(self.rcfile, 'w'))
if not rc_exists:
os.chmod(self.rcfile, 0o600)
except IOError:
sys.stderr.write("ERROR: couldn't write the config file\n")
except OSError:
# what to do if impossible?
sys.stderr.write("ERROR: couldn't create the config directory\n")
def get(self, key, default=None):
return self.options.get(key, default)
def pop(self, key, default=None):
return self.options.pop(key, default)
def get_misc(self, sect, key, default=None):
return self.misc.get(sect,{}).get(key, default)
def __setitem__(self, key, value):
self.options[key] = value
if key in self.options and isinstance(self.options[key], str) and \
key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER:
self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key])
def __getitem__(self, key):
return self.options[key]
@property
def addons_data_dir(self):
add_dir = os.path.join(self['data_dir'], 'addons')
d = os.path.join(add_dir, release.series)
if not os.path.exists(d):
try:
# bootstrap parent dir +rwx
if not os.path.exists(add_dir):
os.makedirs(add_dir, 0o700)
# try to make +rx placeholder dir, will need manual +w to activate it
os.makedirs(d, 0o500)
except OSError:
logging.getLogger(__name__).debug('Failed to create addons data dir %s', d)
return d
@property
def session_dir(self):
d = os.path.join(self['data_dir'], 'sessions')
try:
os.makedirs(d, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
def filestore(self, dbname):
return os.path.join(self['data_dir'], 'filestore', dbname)
def set_admin_password(self, new_password):
hash_password = crypt_context.hash if hasattr(crypt_context, 'hash') else crypt_context.encrypt
self.options['admin_passwd'] = hash_password(new_password)
def verify_admin_password(self, password):
"""Verifies the super-admin password, possibly updating the stored hash if needed"""
stored_hash = self.options['admin_passwd']
if not stored_hash:
# empty password/hash => authentication forbidden
return False
result, updated_hash = crypt_context.verify_and_update(password, stored_hash)
if result:
if updated_hash:
self.options['admin_passwd'] = updated_hash
return True
def _normalize(self, path):
if not path:
return ''
return realpath(abspath(expanduser(expandvars(path.strip()))))
config = configmanager()
| 54.154778 | 40,237 |
10,761 |
py
|
PYTHON
|
15.0
|
from lxml import etree
from lxml.builder import E
import copy
import itertools
import logging
from odoo.tools.translate import _
from odoo.tools import SKIPPED_ELEMENT_TYPES, html_escape
_logger = logging.getLogger(__name__)
def add_text_before(node, text):
""" Add text before ``node`` in its XML tree. """
if text is None:
return
prev = node.getprevious()
if prev is not None:
prev.tail = (prev.tail or "") + text
else:
parent = node.getparent()
parent.text = (parent.text or "") + text
def add_text_inside(node, text):
""" Add text inside ``node``. """
if text is None:
return
if len(node):
node[-1].tail = (node[-1].tail or "") + text
else:
node.text = (node.text or "") + text
def remove_element(node):
""" Remove ``node`` but not its tail, from its XML tree. """
add_text_before(node, node.tail)
node.tail = None
node.getparent().remove(node)
def locate_node(arch, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param arch: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
if spec.tag == 'xpath':
expr = spec.get('expr')
try:
xPath = etree.ETXPath(expr)
except etree.XPathSyntaxError:
_logger.error("XPathSyntaxError while parsing xpath %r", expr)
raise
nodes = xPath(arch)
return nodes[0] if nodes else None
elif spec.tag == 'field':
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in arch.iter('field'):
if node.get('name') == spec.get('name'):
return node
return None
for node in arch.iter(spec.tag):
if isinstance(node, SKIPPED_ELEMENT_TYPES):
continue
if all(node.get(attr) == spec.get(attr) for attr in spec.attrib
if attr not in ('position', 'version')):
# Version spec should match parent's root element's version
if spec.get('version') and spec.get('version') != arch.get('version'):
return None
return node
return None
def apply_inheritance_specs(source, specs_tree, inherit_branding=False, pre_locate=lambda s: True):
""" Apply an inheriting view (a descendant of the base view)
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param Element source: a parent architecture to modify
:param Element specs_tree: a modifying architecture in an inheriting view
:param bool inherit_branding:
:param pre_locate: function that is executed before locating a node.
This function receives an arch as argument.
This is required by studio to properly handle group_ids.
:return: a modified source where the specs are applied
:rtype: Element
"""
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = specs_tree if isinstance(specs_tree, list) else [specs_tree]
def extract(spec):
"""
Utility function that locates a node given a specification, remove
it from the source and returns it.
"""
if len(spec):
raise ValueError(
_("Invalid specification for moved nodes: %r", etree.tostring(spec, encoding='unicode'))
)
pre_locate(spec)
to_extract = locate_node(source, spec)
if to_extract is not None:
remove_element(to_extract)
return to_extract
else:
raise ValueError(
_("Element %r cannot be located in parent view", etree.tostring(spec, encoding='unicode'))
)
while len(specs):
spec = specs.pop(0)
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
continue
if spec.tag == 'data':
specs += [c for c in spec]
continue
pre_locate(spec)
node = locate_node(source, spec)
if node is not None:
pos = spec.get('position', 'inside')
if pos == 'replace':
mode = spec.get('mode', 'outer')
if mode == "outer":
for loc in spec.xpath(".//*[text()='$0']"):
loc.text = ''
loc.append(copy.deepcopy(node))
if node.getparent() is None:
spec_content = None
comment = None
for content in spec:
if content.tag is not etree.Comment:
spec_content = content
break
else:
comment = content
source = copy.deepcopy(spec_content)
# only keep the t-name of a template root node
t_name = node.get('t-name')
if t_name:
source.set('t-name', t_name)
if comment is not None:
text = source.text
source.text = None
comment.tail = text
source.insert(0, comment)
else:
# TODO ideally the notion of 'inherit_branding' should
# not exist in this function. Given the current state of
# the code, it is however necessary to know where nodes
# were removed when distributing branding. As a stable
# fix, this solution was chosen: the location is marked
# with a "ProcessingInstruction" which will not impact
# the "Element" structure of the resulting tree.
# Exception: if we happen to replace a node that already
# has xpath branding (root level nodes), do not mark the
# location of the removal as it will mess up the branding
# of siblings elements coming from other views, after the
# branding is distributed (and those processing instructions
# removed).
if inherit_branding and not node.get('data-oe-xpath'):
node.addprevious(etree.ProcessingInstruction('apply-inheritance-specs-node-removal', node.tag))
for child in spec:
if child.get('position') == 'move':
child = extract(child)
node.addprevious(child)
node.getparent().remove(node)
elif mode == "inner":
# Replace the entire content of an element
for child in node:
node.remove(child)
node.text = None
for child in spec:
node.append(copy.deepcopy(child))
node.text = spec.text
else:
raise ValueError(_("Invalid mode attribute:") + " '%s'" % mode)
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = child.get('name')
value = child.text or ''
if child.get('add') or child.get('remove'):
assert not child.text
separator = child.get('separator', ',')
if separator == ' ':
separator = None # squash spaces
to_add = (
s for s in (s.strip() for s in child.get('add', '').split(separator))
if s
)
to_remove = {s.strip() for s in child.get('remove', '').split(separator)}
values = (s.strip() for s in node.get(attribute, '').split(separator))
value = (separator or ' ').join(itertools.chain(
(v for v in values if v not in to_remove),
to_add
))
if value:
node.set(attribute, value)
elif attribute in node.attrib:
del node.attrib[attribute]
elif pos == 'inside':
add_text_inside(node, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
node.append(child)
elif pos == 'after':
# add a sentinel element right after node, insert content of
# spec before the sentinel, then remove the sentinel element
sentinel = E.sentinel()
node.addnext(sentinel)
add_text_before(sentinel, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
sentinel.addprevious(child)
remove_element(sentinel)
elif pos == 'before':
add_text_before(node, spec.text)
for child in spec:
if child.get('position') == 'move':
child = extract(child)
node.addprevious(child)
else:
raise ValueError(
_("Invalid position attribute: '%s'") %
pos
)
else:
attrs = ''.join([
' %s="%s"' % (attr, html_escape(spec.get(attr)))
for attr in spec.attrib
if attr != 'position'
])
tag = "<%s%s>" % (spec.tag, attrs)
raise ValueError(
_("Element '%s' cannot be located in parent view", tag)
)
return source
| 41.709302 | 10,761 |
3,490 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
## this functions are taken from the setuptools package (version 0.6c8)
## http://peak.telecommunity.com/DevCenter/PkgResources#parsing-utilities
from __future__ import print_function
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','_':'final-','rc':'c','dev':'@','saas':'','~':''}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them.
"""
parts = []
for part in _parse_version_parts((s or '0.1').lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
if __name__ == '__main__':
def chk(lst, verbose=False):
pvs = []
for v in lst:
pv = parse_version(v)
pvs.append(pv)
if verbose:
print(v, pv)
for a, b in zip(pvs, pvs[1:]):
assert a < b, '%s < %s == %s' % (a, b, a < b)
chk(('0', '4.2', '4.2.3.4', '5.0.0-alpha', '5.0.0-rc1', '5.0.0-rc1.1', '5.0.0_rc2', '5.0.0_rc3', '5.0.0'), False)
chk(('5.0.0-0_rc3', '5.0.0-1dev', '5.0.0-1'), False)
| 43.625 | 3,490 |
9,491 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
import math
import calendar
from datetime import date, datetime, time
import pytz
from dateutil.relativedelta import relativedelta
from . import ustr
from .func import lazy
def get_month(date):
''' Compute the month dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
date_from = type(date)(date.year, date.month, 1)
date_to = type(date)(date.year, date.month, calendar.monthrange(date.year, date.month)[1])
return date_from, date_to
def get_quarter_number(date):
''' Get the number of the quarter on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A [1-4] integer.
'''
return math.ceil(date.month / 3)
def get_quarter(date):
''' Compute the quarter dates range on which the 'date' parameter belongs to.
:param date: A datetime.datetime or datetime.date object.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
quarter_number = get_quarter_number(date)
month_from = ((quarter_number - 1) * 3) + 1
date_from = type(date)(date.year, month_from, 1)
date_to = (date_from + relativedelta(months=2))
date_to = date_to.replace(day=calendar.monthrange(date_to.year, date_to.month)[1])
return date_from, date_to
def get_fiscal_year(date, day=31, month=12):
''' Compute the fiscal year dates range on which the 'date' parameter belongs to.
A fiscal year is the period used by governments for accounting purposes and vary between countries.
By default, calling this method with only one parameter gives the calendar year because the ending date of the
fiscal year is set to the YYYY-12-31.
:param date: A datetime.datetime or datetime.date object.
:param day: The day of month the fiscal year ends.
:param month: The month of year the fiscal year ends.
:return: A tuple (date_from, date_to) having the same object type as the 'date' parameter.
'''
max_day = calendar.monthrange(date.year, month)[1]
date_to = type(date)(date.year, month, min(day, max_day))
# Force at 29 February instead of 28 in case of leap year.
if date_to.month == 2 and date_to.day == 28 and max_day == 29:
date_to = type(date)(date.year, 2, 29)
if date <= date_to:
date_from = date_to - relativedelta(years=1)
max_day = calendar.monthrange(date_from.year, date_from.month)[1]
# Force at 29 February instead of 28 in case of leap year.
if date_from.month == 2 and date_from.day == 28 and max_day == 29:
date_from = type(date)(date_from.year, 2, 29)
date_from += relativedelta(days=1)
else:
date_from = date_to + relativedelta(days=1)
max_day = calendar.monthrange(date_to.year + 1, date_to.month)[1]
date_to = type(date)(date.year + 1, month, min(day, max_day))
# Force at 29 February instead of 28 in case of leap year.
if date_to.month == 2 and date_to.day == 28 and max_day == 29:
date_to += relativedelta(days=1)
return date_from, date_to
def get_timedelta(qty, granularity):
"""
Helper to get a `relativedelta` object for the given quantity and interval unit.
:param qty: the number of unit to apply on the timedelta to return
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
"""
switch = {
'hour': relativedelta(hours=qty),
'day': relativedelta(days=qty),
'week': relativedelta(weeks=qty),
'month': relativedelta(months=qty),
'year': relativedelta(years=qty),
}
return switch[granularity]
def start_of(value, granularity):
"""
Get start of a time period from a date or a datetime.
:param value: initial date or datetime.
:param granularity: type of period in string, can be year, quarter, month, week, day or hour.
:return: a date/datetime object corresponding to the start of the specified period.
"""
is_datetime = isinstance(value, datetime)
if granularity == "year":
result = value.replace(month=1, day=1)
elif granularity == "quarter":
# Q1 = Jan 1st
# Q2 = Apr 1st
# Q3 = Jul 1st
# Q4 = Oct 1st
result = get_quarter(value)[0]
elif granularity == "month":
result = value.replace(day=1)
elif granularity == 'week':
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
# by default MONDAY is the first day of the week and SUNDAY is the last.
result = value - relativedelta(days=calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
result = value
elif granularity == "hour" and is_datetime:
return datetime.combine(value, time.min).replace(hour=value.hour)
elif is_datetime:
raise ValueError(
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
)
else:
raise ValueError(
"Granularity must be year, quarter, month, week or day for value %s" % value
)
return datetime.combine(result, time.min) if is_datetime else result
def end_of(value, granularity):
"""
Get end of a time period from a date or a datetime.
:param value: initial date or datetime.
:param granularity: Type of period in string, can be year, quarter, month, week, day or hour.
:return: A date/datetime object corresponding to the start of the specified period.
"""
is_datetime = isinstance(value, datetime)
if granularity == "year":
result = value.replace(month=12, day=31)
elif granularity == "quarter":
# Q1 = Mar 31st
# Q2 = Jun 30th
# Q3 = Sep 30th
# Q4 = Dec 31st
result = get_quarter(value)[1]
elif granularity == "month":
result = value + relativedelta(day=1, months=1, days=-1)
elif granularity == 'week':
# `calendar.weekday` uses ISO8601 for start of week reference, this means that
# by default MONDAY is the first day of the week and SUNDAY is the last.
result = value + relativedelta(days=6-calendar.weekday(value.year, value.month, value.day))
elif granularity == "day":
result = value
elif granularity == "hour" and is_datetime:
return datetime.combine(value, time.max).replace(hour=value.hour)
elif is_datetime:
raise ValueError(
"Granularity must be year, quarter, month, week, day or hour for value %s" % value
)
else:
raise ValueError(
"Granularity must be year, quarter, month, week or day for value %s" % value
)
return datetime.combine(result, time.max) if is_datetime else result
def add(value, *args, **kwargs):
"""
Return the sum of ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value + relativedelta(*args, **kwargs)
def subtract(value, *args, **kwargs):
"""
Return the difference between ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value - relativedelta(*args, **kwargs)
def json_default(obj):
"""
Properly serializes date and datetime objects.
"""
from odoo import fields
if isinstance(obj, datetime):
return fields.Datetime.to_string(obj)
if isinstance(obj, date):
return fields.Date.to_string(obj)
if isinstance(obj, lazy):
return obj._value
return ustr(obj)
def date_range(start, end, step=relativedelta(months=1)):
"""Date range generator with a step interval.
:param start datetime: beginning date of the range.
:param end datetime: ending date of the range.
:param step relativedelta: interval of the range.
:return: a range of datetime from start to end.
:rtype: Iterator[datetime]
"""
are_naive = start.tzinfo is None and end.tzinfo is None
are_utc = start.tzinfo == pytz.utc and end.tzinfo == pytz.utc
# Cases with miscellenous timezone are more complexe because of DST.
are_others = start.tzinfo and end.tzinfo and not are_utc
if are_others:
if start.tzinfo.zone != end.tzinfo.zone:
raise ValueError("Timezones of start argument and end argument seem inconsistent")
if not are_naive and not are_utc and not are_others:
raise ValueError("Timezones of start argument and end argument mismatch")
if start > end:
raise ValueError("start > end, start date must be before end")
if start == start + step:
raise ValueError("Looks like step is null")
if start.tzinfo:
localize = start.tzinfo.localize
else:
localize = lambda dt: dt
dt = start.replace(tzinfo=None)
end = end.replace(tzinfo=None)
while dt <= end:
yield localize(dt)
dt = dt + step
| 37.366142 | 9,491 |
31,656 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
__all__ = [
'convert_file', 'convert_sql_import',
'convert_csv_import', 'convert_xml_import'
]
import base64
import io
import logging
import os.path
import pprint
import re
import subprocess
import warnings
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pytz
from lxml import etree, builder
try:
import jingtrang
except ImportError:
jingtrang = None
import odoo
from . import pycompat
from .config import config
from .misc import file_open, unquote, ustr, SKIPPED_ELEMENT_TYPES
from .translate import _
from odoo import SUPERUSER_ID, api
from odoo.exceptions import ValidationError
_logger = logging.getLogger(__name__)
from .safe_eval import safe_eval as s_eval, pytz, time
safe_eval = lambda expr, ctx={}: s_eval(expr, ctx, nocopy=True)
class ParseError(Exception):
...
class RecordDictWrapper(dict):
"""
Used to pass a record as locals in eval:
records do not strictly behave like dict, so we force them to.
"""
def __init__(self, record):
self.record = record
def __getitem__(self, key):
if key in self.record:
return self.record[key]
return dict.__getitem__(self, key)
def _get_idref(self, env, model_str, idref):
idref2 = dict(idref,
Command=odoo.fields.Command,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=odoo.release.major_version,
ref=self.id_get,
pytz=pytz)
if model_str:
idref2['obj'] = env[model_str].browse
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, env):
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model')
if node.get('search'):
f_search = node.get("search")
f_use = node.get("use",'id')
f_name = node.get("name")
idref2 = {}
if f_search:
idref2 = _get_idref(self, env, f_model, self.idref)
q = safe_eval(f_search, idref2)
ids = env[f_model].search(q).ids
if f_use != 'id':
ids = [x[f_use] for x in env[f_model].browse(ids).read([f_use])]
_fields = env[f_model]._fields
if (f_name in _fields) and _fields[f_name].type == 'many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval')
if a_eval:
idref2 = _get_idref(self, env, f_model, self.idref)
try:
return safe_eval(a_eval, idref2)
except Exception:
logging.getLogger('odoo.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), env.context)
raise
def _process(s):
matches = re.finditer(br'[^%]%\((.*?)\)[ds]'.decode('utf-8'), s)
done = set()
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.add(found)
id = m.groups()[0]
if not id in self.idref:
self.idref[id] = self.id_get(id)
# So funny story: in Python 3, bytes(n: int) returns a
# bytestring of n nuls. In Python 2 it obviously returns the
# stringified number, which is what we're expecting here
s = s.replace(found, str(self.idref[id]))
s = s.replace('%%', '%') # Quite weird but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join(etree.tostring(n, encoding='unicode') for n in node))
if t == 'html':
return _process("".join(etree.tostring(n, encoding='unicode') for n in node))
data = node.text
if node.get('file'):
with file_open(node.get('file'), 'rb', env=env) as f:
data = f.read()
if t == 'base64':
return base64.b64encode(data)
# after that, only text content makes sense
data = pycompat.to_text(data)
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self, n, env))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
model_str = node.get('model')
model = env[model_str]
method_name = node.get('name')
# determine arguments
args = []
kwargs = {}
a_eval = node.get('eval')
if a_eval:
idref2 = _get_idref(self, env, model_str, self.idref)
args = list(safe_eval(a_eval, idref2))
for child in node:
if child.tag == 'value' and child.get('name'):
kwargs[child.get('name')] = _eval_xml(self, child, env)
else:
args.append(_eval_xml(self, child, env))
# merge current context with context in kwargs
kwargs['context'] = {**env.context, **kwargs.get('context', {})}
# invoke method
return odoo.api.call_kw(model, method_name, args, kwargs)
elif node.tag == "test":
return node.text
def str2bool(value):
return value.lower() not in ('0', 'false', 'off')
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return str2bool(val)
class xml_import(object):
def get_env(self, node, eval_context=None):
uid = node.get('uid')
context = node.get('context')
if uid or context:
return self.env(
user=uid and self.id_get(uid),
context=context and {
**self.env.context,
**safe_eval(context, {
'ref': self.id_get,
**(eval_context or {})
})
}
)
return self.env
def make_xml_id(self, xml_id):
if not xml_id or '.' in xml_id:
return xml_id
return "%s.%s" % (self.module, xml_id)
def _test_xml_id(self, xml_id):
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.env['ir.module.module'].search_count([('name', '=', module), ('state', '=', 'installed')])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
def _tag_delete(self, rec):
d_model = rec.get("model")
records = self.env[d_model]
d_search = rec.get("search")
if d_search:
idref = _get_idref(self, self.env, d_model, {})
try:
records = records.search(safe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
d_id = rec.get("id")
if d_id:
try:
records += records.browse(self.id_get(d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
if records:
records.unlink()
def _tag_report(self, rec):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f)
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field, dest in (('attachment', 'attachment'),
('attachment_use', 'attachment_use'),
('usage', 'usage'),
('file', 'report_file'),
('report_type', 'report_type'),
('parser', 'parser'),
('print_report_name', 'print_report_name'),
):
if rec.get(field):
res[dest] = rec.get(field)
if rec.get('auto'):
res['auto'] = safe_eval(rec.get('auto','False'))
if rec.get('header'):
res['header'] = safe_eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and safe_eval(rec.get('multi','False'))
xml_id = rec.get('id','')
self._test_xml_id(xml_id)
warnings.warn(f"The <report> tag is deprecated, use a <record> tag for {xml_id!r}.", DeprecationWarning)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(group[1:])
groups_value.append(odoo.Command.unlink(group_id))
else:
group_id = self.id_get(group)
groups_value.append(odoo.Command.link(group_id))
res['groups_id'] = groups_value
if rec.get('paperformat'):
pf_name = rec.get('paperformat')
pf_id = self.id_get(pf_name)
res['paperformat_id'] = pf_id
xid = self.make_xml_id(xml_id)
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
report = self.env['ir.actions.report']._load_records([data], self.mode == 'update')
self.idref[xml_id] = report.id
if not rec.get('menu') or safe_eval(rec.get('menu','False')):
report.create_action()
elif self.mode=='update' and safe_eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
report.unlink_action()
return report.id
def _tag_function(self, rec):
if self.noupdate and self.mode != 'init':
return
env = self.get_env(rec)
_eval_xml(self, rec, env)
def _tag_act_window(self, rec):
name = rec.get('name')
xml_id = rec.get('id','')
self._test_xml_id(xml_id)
warnings.warn(f"The <act_window> tag is deprecated, use a <record> for {xml_id!r}.", DeprecationWarning)
view_id = False
if rec.get('view_id'):
view_id = self.id_get(rec.get('view_id'))
domain = rec.get('domain') or '[]'
res_model = rec.get('res_model')
binding_model = rec.get('binding_model')
view_mode = rec.get('view_mode') or 'tree,form'
usage = rec.get('usage')
limit = rec.get('limit')
uid = self.env.user.id
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': 'ir.actions.act_window',
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': binding_model,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'uid': uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
}
context = self.get_env(rec, eval_context).context
try:
domain = safe_eval(domain, eval_context)
except (ValueError, NameError):
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': 'ir.actions.act_window',
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(group[1:])
groups_value.append(odoo.Command.unlink(group_id))
else:
group_id = self.id_get(group)
groups_value.append(odoo.Command.link(group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if binding_model:
res['binding_model_id'] = self.env['ir.model']._get(binding_model).id
res['binding_type'] = rec.get('binding_type') or 'action'
views = rec.get('binding_views')
if views is not None:
res['binding_view_types'] = views
xid = self.make_xml_id(xml_id)
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
self.env['ir.actions.act_window']._load_records([data], self.mode == 'update')
def _tag_menuitem(self, rec, parent=None):
rec_id = rec.attrib["id"]
self._test_xml_id(rec_id)
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
values = {
'parent_id': False,
'active': nodeattr2bool(rec, 'active', default=True),
}
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if parent is not None:
values['parent_id'] = parent
elif rec.get('parent'):
values['parent_id'] = self.id_get(rec.attrib['parent'])
elif rec.get('web_icon'):
values['web_icon'] = rec.attrib['web_icon']
if rec.get('name'):
values['name'] = rec.attrib['name']
if rec.get('action'):
a_action = rec.attrib['action']
if '.' not in a_action:
a_action = '%s.%s' % (self.module, a_action)
act = self.env.ref(a_action).sudo()
values['action'] = "%s,%d" % (act.type, act.id)
if not values.get('name') and act.type.endswith(('act_window', 'wizard', 'url', 'client', 'server')) and act.name:
values['name'] = act.name
if not values.get('name'):
values['name'] = rec_id or '?'
groups = []
for group in rec.get('groups', '').split(','):
if group.startswith('-'):
group_id = self.id_get(group[1:])
groups.append(odoo.Command.unlink(group_id))
elif group:
group_id = self.id_get(group)
groups.append(odoo.Command.link(group_id))
if groups:
values['groups_id'] = groups
data = {
'xml_id': self.make_xml_id(rec_id),
'values': values,
'noupdate': self.noupdate,
}
menu = self.env['ir.ui.menu']._load_records([data], self.mode == 'update')
for child in rec.iterchildren('menuitem'):
self._tag_menuitem(child, parent=menu.id)
def _tag_record(self, rec):
rec_model = rec.get("model")
env = self.get_env(rec)
rec_id = rec.get("id", '')
model = env[rec_model]
if self.xml_filename and rec_id:
model = model.with_context(
install_module=self.module,
install_filename=self.xml_filename,
install_xmlid=rec_id,
)
self._test_xml_id(rec_id)
xid = self.make_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitly
# opt-out using @noupdate="1". A second check will be performed in
# model._load_records() using the record's ir.model.data `noupdate` field.
if self.noupdate and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
record = env['ir.model.data']._load_xmlid(xid)
if record:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = record.id
return None
elif not nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
if xid and xid.partition('.')[0] != self.module:
# updating a record created by another module
record = self.env['ir.model.data']._load_xmlid(xid)
if not record:
if self.noupdate and not nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
raise Exception("Cannot update missing record %r" % xid)
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name")
f_ref = field.get("ref")
f_search = field.get("search")
f_model = field.get("model")
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'') or 'id'
f_val = False
if f_search:
idref2 = _get_idref(self, env, f_model, self.idref)
q = safe_eval(f_search, idref2)
assert f_model, 'Define an attribute model="..." in your .XML file !'
# browse the objects searched
s = env[f_model].search(q)
# column definitions of the "local" object
_fields = env[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [odoo.Command.set([x[f_use] for x in s])]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(f_ref, raise_if_not_found=nodeattr2bool(rec, 'forcecreate', True))
if not f_val:
_logger.warning("Skipping creation of %r because %s=%r could not be resolved", xid, f_name, f_ref)
return None
else:
f_val = _eval_xml(self, field, env)
if f_name in model._fields:
field_type = model._fields[f_name].type
if field_type == 'many2one':
f_val = int(f_val) if f_val else False
elif field_type == 'integer':
f_val = int(f_val)
elif field_type in ('float', 'monetary'):
f_val = float(f_val)
elif field_type == 'boolean' and isinstance(f_val, str):
f_val = str2bool(f_val)
res[f_name] = f_val
data = dict(xml_id=xid, values=res, noupdate=self.noupdate)
record = model._load_records([data], self.mode == 'update')
if rec_id:
self.idref[rec_id] = record.id
if config.get('import_partial'):
env.cr.commit()
return rec_model, record.id
def _tag_template(self, el):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name'))
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
if self.module.startswith('theme_'):
model = 'theme.ir.ui.view'
else:
model = 'ir.ui.view'
record_attrs = {
'id': tpl_id,
'model': model,
}
for att in ['forcecreate', 'context']:
if att in el.attrib:
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field(full_tpl_id, name='key'))
record.append(Field("qweb", name='type'))
if 'track' in el.attrib:
record.append(Field(el.get('track'), name='track'))
if 'priority' in el.attrib:
record.append(Field(el.get('priority'), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if 'website_id' in el.attrib:
record.append(Field(name='website_id', ref=el.get('website_id')))
if 'key' in el.attrib:
record.append(Field(el.get('key'), name='key'))
if el.get('active') in ("True", "False"):
view_id = self.id_get(tpl_id, raise_if_not_found=False)
if self.mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = [("ref('%s')" % x) for x in groups.split(',')]
record.append(Field(name="groups_id", eval="[Command.set(["+', '.join(grp_lst)+"])]"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(record)
def id_get(self, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(id_str, raise_if_not_found)
return res and res[1]
def model_id_get(self, id_str, raise_if_not_found=True):
if '.' not in id_str:
id_str = '%s.%s' % (self.module, id_str)
return self.env['ir.model.data']._xmlid_to_res_model_res_id(id_str, raise_if_not_found=raise_if_not_found)
def _tag_root(self, el):
for rec in el:
f = self._tags.get(rec.tag)
if f is None:
continue
self.envs.append(self.get_env(el))
self._noupdate.append(nodeattr2bool(el, 'noupdate', self.noupdate))
try:
f(rec)
except ParseError:
raise
except ValidationError as err:
msg = "while parsing {file}:{viewline}\n{err}\n\nView error context:\n{context}\n".format(
file=rec.getroottree().docinfo.URL,
viewline=rec.sourceline,
context=pprint.pformat(getattr(err, 'context', None) or '-no context-'),
err=err.args[0],
)
_logger.debug(msg, exc_info=True)
raise ParseError(msg) from None # Restart with "--log-handler odoo.tools.convert:DEBUG" for complete traceback
except Exception as e:
raise ParseError('while parsing %s:%s, somewhere inside\n%s' % (
rec.getroottree().docinfo.URL,
rec.sourceline,
etree.tostring(rec, encoding='unicode').rstrip()
)) from e
finally:
self._noupdate.pop()
self.envs.pop()
@property
def env(self):
return self.envs[-1]
@property
def noupdate(self):
return self._noupdate[-1]
def __init__(self, cr, module, idref, mode, noupdate=False, xml_filename=None):
self.mode = mode
self.module = module
self.envs = [odoo.api.Environment(cr, SUPERUSER_ID, {})]
self.idref = {} if idref is None else idref
self._noupdate = [noupdate]
self.xml_filename = xml_filename
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'report': self._tag_report,
'act_window': self._tag_act_window,
**dict.fromkeys(self.DATA_ROOTS, self._tag_root)
}
def parse(self, de):
assert de.tag in self.DATA_ROOTS, "Root xml tag must be <openerp>, <odoo> or <data>."
self._tag_root(de)
DATA_ROOTS = ['odoo', 'data', 'openerp']
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
ext = os.path.splitext(filename)[1].lower()
with file_open(pathname, 'rb') as fp:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
raise ValueError("Can't load unknown file type %s.", filename)
def convert_sql_import(cr, fp):
cr.execute(fp.read()) # pylint: disable=sql-injection
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
filename, _ext = os.path.splitext(os.path.basename(fname))
model = filename.split('-')[0]
reader = pycompat.csv_reader(io.BytesIO(csvcontent), quotechar='"', delimiter=',')
fields = next(reader)
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
# filter out empty lines (any([]) == False) and lines containing only empty cells
datas = [
line for line in reader
if any(line)
]
context = {
'mode': mode,
'module': module,
'install_module': module,
'install_filename': fname,
'noupdate': noupdate,
}
env = odoo.api.Environment(cr, SUPERUSER_ID, context)
result = env[model].load(fields, datas)
if any(msg['type'] == 'error' for msg in result['messages']):
# Report failed import and abort module install
warning_msg = "\n".join(msg['message'] for msg in result['messages'])
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
schema = os.path.join(config['root_path'], 'import_xml.rng')
relaxng = etree.RelaxNG(etree.parse(schema))
try:
relaxng.assert_(doc)
except Exception:
_logger.exception("The XML file '%s' does not fit the required schema !", xmlfile.name)
if jingtrang:
p = subprocess.run(['pyjing', schema, xmlfile.name], stdout=subprocess.PIPE)
_logger.warning(p.stdout.decode())
else:
for e in relaxng.error_log:
_logger.warning(e)
_logger.info("Install 'jingtrang' for more precise and useful validation messages.")
raise
if isinstance(xmlfile, str):
xml_filename = xmlfile
else:
xml_filename = xmlfile.name
obj = xml_import(cr, module, idref, mode, noupdate=noupdate, xml_filename=xml_filename)
obj.parse(doc.getroot())
| 38.889435 | 31,656 |
8,267 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# decorator makes wrappers that have the same API as their wrapped function
from collections import Counter, defaultdict
from decorator import decorator
from inspect import signature
import logging
unsafe_eval = eval
_logger = logging.getLogger(__name__)
class ormcache_counter(object):
""" Statistic counters for cache entries. """
__slots__ = ['hit', 'miss', 'err']
def __init__(self):
self.hit = 0
self.miss = 0
self.err = 0
@property
def ratio(self):
return 100.0 * self.hit / (self.hit + self.miss or 1)
# statistic counters dictionary, maps (dbname, modelname, method) to counter
STAT = defaultdict(ormcache_counter)
class ormcache(object):
""" LRU cache decorator for model methods.
The parameters are strings that represent expressions referring to the
signature of the decorated method, and are used to compute a cache key::
@ormcache('model_name', 'mode')
def _compute_domain(self, model_name, mode="read"):
...
For the sake of backward compatibility, the decorator supports the named
parameter `skiparg`::
@ormcache(skiparg=1)
def _compute_domain(self, model_name, mode="read"):
...
Methods implementing this decorator should never return a Recordset,
because the underlying cursor will eventually be closed and raise a
`psycopg2.OperationalError`.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.skiparg = kwargs.get('skiparg')
def __call__(self, method):
self.method = method
self.determine_key()
lookup = decorator(self.lookup, method)
lookup.clear_cache = self.clear
return lookup
def determine_key(self):
""" Determine the function that computes a cache key from arguments. """
if self.skiparg is None:
# build a string that represents function code and evaluate it
args = str(signature(self.method))[1:-1]
if self.args:
code = "lambda %s: (%s,)" % (args, ", ".join(self.args))
else:
code = "lambda %s: ()" % (args,)
self.key = unsafe_eval(code)
else:
# backward-compatible function that uses self.skiparg
self.key = lambda *args, **kwargs: args[self.skiparg:]
def lru(self, model):
counter = STAT[(model.pool.db_name, model._name, self.method)]
return model.pool._Registry__cache, (model._name, self.method), counter
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
key = key0 + self.key(*args, **kwargs)
try:
r = d[key]
counter.hit += 1
return r
except KeyError:
counter.miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
_logger.warning("cache lookup error on %r", key, exc_info=True)
counter.err += 1
return self.method(*args, **kwargs)
def clear(self, model, *args):
""" Clear the registry cache """
model.pool._clear_cache()
class ormcache_context(ormcache):
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
extra parameter ``keys`` that defines a sequence of dictionary keys. Those
keys are looked up in the ``context`` parameter and combined to the cache
key made by :class:`ormcache`.
"""
def __init__(self, *args, **kwargs):
super(ormcache_context, self).__init__(*args, **kwargs)
self.keys = kwargs['keys']
def determine_key(self):
""" Determine the function that computes a cache key from arguments. """
assert self.skiparg is None, "ormcache_context() no longer supports skiparg"
# build a string that represents function code and evaluate it
sign = signature(self.method)
args = str(sign)[1:-1]
cont_expr = "(context or {})" if 'context' in sign.parameters else "self._context"
keys_expr = "tuple(%s.get(k) for k in %r)" % (cont_expr, self.keys)
if self.args:
code = "lambda %s: (%s, %s)" % (args, ", ".join(self.args), keys_expr)
else:
code = "lambda %s: (%s,)" % (args, keys_expr)
self.key = unsafe_eval(code)
class ormcache_multi(ormcache):
""" This LRU cache decorator is a variant of :class:`ormcache`, with an
extra parameter ``multi`` that gives the name of a parameter. Upon call, the
corresponding argument is iterated on, and every value leads to a cache
entry under its own key.
"""
def __init__(self, *args, **kwargs):
super(ormcache_multi, self).__init__(*args, **kwargs)
self.multi = kwargs['multi']
def determine_key(self):
""" Determine the function that computes a cache key from arguments. """
assert self.skiparg is None, "ormcache_multi() no longer supports skiparg"
assert isinstance(self.multi, str), "ormcache_multi() parameter multi must be an argument name"
super(ormcache_multi, self).determine_key()
# key_multi computes the extra element added to the key
sign = signature(self.method)
args = str(sign)[1:-1]
code_multi = "lambda %s: %s" % (args, self.multi)
self.key_multi = unsafe_eval(code_multi)
# self.multi_pos is the position of self.multi in args
self.multi_pos = list(sign.parameters).index(self.multi)
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
base_key = key0 + self.key(*args, **kwargs)
ids = self.key_multi(*args, **kwargs)
result = {}
missed = []
# first take what is available in the cache
for i in ids:
key = base_key + (i,)
try:
result[i] = d[key]
counter.hit += 1
except Exception:
counter.miss += 1
missed.append(i)
if missed:
# call the method for the ids that were not in the cache; note that
# thanks to decorator(), the multi argument will be bound and passed
# positionally in args.
args = list(args)
args[self.multi_pos] = missed
result.update(method(*args, **kwargs))
# store those new results back in the cache
for i in missed:
key = base_key + (i,)
d[key] = result[i]
return result
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching. """
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
def log_ormcache_stats(sig=None, frame=None):
""" Log statistics of ormcache usage by database, model, and method. """
from odoo.modules.registry import Registry
import threading
me = threading.current_thread()
me_dbname = getattr(me, 'dbname', 'n/a')
for dbname, reg in sorted(Registry.registries.d.items()):
# set logger prefix to dbname
me.dbname = dbname
entries = Counter(k[:2] for k in reg._Registry__cache.d)
# show entries sorted by model name, method name
for key in sorted(entries, key=lambda key: (key[0], key[1].__name__)):
model, method = key
stat = STAT[(dbname, model, method)]
_logger.info(
"%6d entries, %6d hit, %6d miss, %6d err, %4.1f%% ratio, for %s.%s",
entries[key], stat.hit, stat.miss, stat.err, stat.ratio, model, method.__name__,
)
me.dbname = me_dbname
def get_cache_key_counter(bound_method, *args, **kwargs):
""" Return the cache, key and stat counter for the given call. """
model = bound_method.__self__
ormcache = bound_method.clear_cache.__self__
cache, key0, counter = ormcache.lru(model)
key = key0 + ormcache.key(model, *args, **kwargs)
return cache, key, counter
# For backward compatibility
cache = ormcache
| 35.633621 | 8,267 |
7,142 |
py
|
PYTHON
|
15.0
|
from functools import lru_cache
import json
class SourceMapGenerator:
"""
The SourceMapGenerator creates the sourcemap maps the asset bundle to the js/css files.
What is a sourcemap ? (https://developer.mozilla.org/en-US/docs/Tools/Debugger/How_to/Use_a_source_map)
In brief: a source map is what makes possible to debug your processed/compiled/minified code as if you were
debugging the original, non-altered source code. It is a file that provides a mapping original <=> processed for
the browser to read.
This implementation of the SourceMapGenerator is a translation and adaptation of this implementation
in js https://github.com/mozilla/source-map. For performance purposes, we have removed all unnecessary
functions/steps for our use case. This simpler version does a line by line mapping, with the ability to
add offsets at the start and end of a file. (when we have to add comments on top a transpiled file by example).
"""
def __init__(self, source_root=None):
self._file = None
self._source_root = source_root
self._sources = {}
self._mappings = []
self._sources_contents = {}
self._version = 3
self._cache = {}
def _serialize_mappings(self):
"""
A source map mapping is encoded with the base 64 VLQ format.
This function encodes the readable source to the format.
:return the encoded content
"""
previous_generated_line = 1
previous_original_line = 0
previous_source = 0
encoded_column = base64vlq_encode(0)
result = ""
for mapping in self._mappings:
if mapping["generatedLine"] != previous_generated_line:
while mapping["generatedLine"] > previous_generated_line:
result += ";"
previous_generated_line += 1
if mapping["source"] is not None:
sourceIdx = self._sources[mapping["source"]]
source = sourceIdx - previous_source
previous_source = sourceIdx
# lines are stored 0-based in SourceMap spec version 3
line = mapping["originalLine"] - 1 - previous_original_line
previous_original_line = mapping["originalLine"] - 1
if (source, line) not in self._cache:
self._cache[(source, line)] = "".join([
encoded_column,
base64vlq_encode(source),
base64vlq_encode(line),
encoded_column,
])
result += self._cache[source, line]
return result
def to_json(self):
"""
Generates the json sourcemap.
It is the main function that assembles all the pieces.
:return {str} valid sourcemap in json format
"""
mapping = {
"version": self._version,
"sources": list(self._sources.keys()),
"mappings": self._serialize_mappings(),
"sourcesContent": [self._sources_contents[source] for source in self._sources]
}
if self._file:
mapping["file"] = self._file
if self._source_root:
mapping["sourceRoot"] = self._source_root
return mapping
def get_content(self):
"""Generates the content of the sourcemap.
:return the content of the sourcemap as a string encoded in UTF-8.
"""
# Store with XSSI-prevention prefix
return b")]}'\n" + json.dumps(self.to_json()).encode('utf8')
def add_source(self, source_name, source_content, last_index, start_offset=0):
"""Adds a new source file in the sourcemap. All the lines of the source file will be mapped line by line
to the generated file from the (last_index + start_offset). All lines between
last_index and (last_index + start_offset) will
be mapped to line 1 of the source file.
Example:
ls 1 = Line 1 from new source file
lg 1 = Line 1 from genereted file
ls 1 <=> lg 1 Line 1 from new source file is map to Line 1 from genereted file
nb_ls = number of lines in the new source file
Step 1:
ls 1 <=> lg last_index + 1
Step 2:
ls 1 <=> lg last_index + start_offset + 1
ls 2 <=> lg last_index + start_offset + 2
...
ls nb_ls <=> lg last_index + start_offset + nb_ls
:param source_name: name of the source to add
:param source_content: content of the source to add
:param last_index: Line where we start to map the new source
:param start_offset: Number of lines to pass in the generated file before starting mapping line by line
"""
source_line_count = len(source_content.split("\n"))
self._sources.setdefault(source_name, len(self._sources))
self._sources_contents[source_name] = source_content
if start_offset > 0:
# adds a mapping between the first line of the source
# and the first line of the corresponding code in the generated file.
self._mappings.append({
"generatedLine": last_index + 1,
"originalLine": 1,
"source": source_name,
})
for i in range(1, source_line_count + 1):
self._mappings.append({
"generatedLine": last_index + i + start_offset,
"originalLine": i,
"source": source_name,
})
B64CHARS = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
SHIFTSIZE, FLAG, MASK = 5, 1 << 5, (1 << 5) - 1
@lru_cache(maxsize=64)
def base64vlq_encode(*values):
"""
Encode Base64 VLQ encoded sequences
https://gist.github.com/mjpieters/86b0d152bb51d5f5979346d11005588b
Base64 VLQ is used in source maps.
VLQ values consist of 6 bits (matching the 64 characters of the Base64
alphabet), with the most significant bit a *continuation* flag. If the
flag is set, then the next character in the input is part of the same
integer value. Multiple VLQ character sequences so form an unbounded
integer value, in little-endian order.
The *first* VLQ value consists of a continuation flag, 4 bits for the
value, and the last bit the *sign* of the integer:
+-----+-----+-----+-----+-----+-----+
| c | b3 | b2 | b1 | b0 | s |
+-----+-----+-----+-----+-----+-----+
while subsequent VLQ characters contain 5 bits of value:
+-----+-----+-----+-----+-----+-----+
| c | b4 | b3 | b2 | b1 | b0 |
+-----+-----+-----+-----+-----+-----+
For source maps, Base64 VLQ sequences can contain 1, 4 or 5 elements.
"""
results = []
add = results.append
for v in values:
# add sign bit
v = (abs(v) << 1) | int(v < 0)
while True:
toencode, v = v & MASK, v >> SHIFTSIZE
add(toencode | (v and FLAG))
if not v:
break
return bytes(map(B64CHARS.__getitem__, results)).decode()
| 39.899441 | 7,142 |
18,285 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import io
import re
from datetime import datetime
from hashlib import md5
from logging import getLogger
from PyPDF2 import PdfFileWriter, PdfFileReader
from PyPDF2.generic import DictionaryObject, NameObject, ArrayObject, DecodedStreamObject, NumberObject, createStringObject, ByteStringObject
from zlib import compress, decompress
from PIL import Image
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.utils import ImageReader
from reportlab.pdfgen import canvas
try:
from fontTools.ttLib import TTFont
except ImportError:
TTFont = None
from odoo.tools.misc import file_open
_logger = getLogger(__name__)
DEFAULT_PDF_DATETIME_FORMAT = "D:%Y%m%d%H%M%S+00'00'"
REGEX_SUBTYPE_UNFORMATED = re.compile(r'^\w+/[\w-]+$')
REGEX_SUBTYPE_FORMATED = re.compile(r'^/\w+#2F[\w-]+$')
# make sure values are unwrapped by calling the specialized __getitem__
def _unwrapping_get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
DictionaryObject.get = _unwrapping_get
class BrandedFileWriter(PdfFileWriter):
def __init__(self):
super().__init__()
self.addMetadata({
'/Creator': "Odoo",
'/Producer': "Odoo",
})
PdfFileWriter = BrandedFileWriter
def merge_pdf(pdf_data):
''' Merge a collection of PDF documents in one.
Note that the attachments are not merged.
:param list pdf_data: a list of PDF datastrings
:return: a unique merged PDF datastring
'''
writer = PdfFileWriter()
for document in pdf_data:
reader = PdfFileReader(io.BytesIO(document), strict=False)
for page in range(0, reader.getNumPages()):
writer.addPage(reader.getPage(page))
with io.BytesIO() as _buffer:
writer.write(_buffer)
return _buffer.getvalue()
def rotate_pdf(pdf):
''' Rotate clockwise PDF (90°) into a new PDF.
Note that the attachments are not copied.
:param pdf: a PDF to rotate
:return: a PDF rotated
'''
writer = PdfFileWriter()
reader = PdfFileReader(io.BytesIO(pdf), strict=False)
for page in range(0, reader.getNumPages()):
page = reader.getPage(page)
page.rotateClockwise(90)
writer.addPage(page)
with io.BytesIO() as _buffer:
writer.write(_buffer)
return _buffer.getvalue()
def add_banner(pdf_stream, text=None, logo=False, thickness=2 * cm):
""" Add a banner on a PDF in the upper right corner, with Odoo's logo (optionally).
:param pdf_stream (BytesIO): The PDF stream where the banner will be applied.
:param text (str): The text to be displayed.
:param logo (bool): Whether to display Odoo's logo in the banner.
:param thickness (float): The thickness of the banner in pixels.
:return (BytesIO): The modified PDF stream.
"""
old_pdf = PdfFileReader(pdf_stream, strict=False, overwriteWarnings=False)
packet = io.BytesIO()
can = canvas.Canvas(packet)
odoo_logo = Image.open(file_open('base/static/img/main_partner-image.png', mode='rb'))
odoo_color = colors.Color(113 / 255, 75 / 255, 103 / 255, 0.8)
for p in range(old_pdf.getNumPages()):
page = old_pdf.getPage(p)
width = float(abs(page.mediaBox.getWidth()))
height = float(abs(page.mediaBox.getHeight()))
can.translate(width, height)
can.rotate(-45)
# Draw banner
path = can.beginPath()
path.moveTo(-width, -thickness)
path.lineTo(-width, -2 * thickness)
path.lineTo(width, -2 * thickness)
path.lineTo(width, -thickness)
can.setFillColor(odoo_color)
can.drawPath(path, fill=1, stroke=False)
# Insert text (and logo) inside the banner
can.setFontSize(10)
can.setFillColor(colors.white)
can.drawRightString(0.75 * thickness, -1.45 * thickness, text)
logo and can.drawImage(
ImageReader(odoo_logo), 0.25 * thickness, -2.05 * thickness, 40, 40, mask='auto', preserveAspectRatio=True)
can.showPage()
can.save()
# Merge the old pages with the watermark
watermark_pdf = PdfFileReader(packet, overwriteWarnings=False)
new_pdf = PdfFileWriter()
for p in range(old_pdf.getNumPages()):
new_page = old_pdf.getPage(p)
# Remove annotations (if any), to prevent errors in PyPDF2
if '/Annots' in new_page:
del new_page['/Annots']
new_page.mergePage(watermark_pdf.getPage(p))
new_pdf.addPage(new_page)
# Write the new pdf into a new output stream
output = io.BytesIO()
new_pdf.write(output)
return output
# by default PdfFileReader will overwrite warnings.showwarning which is what
# logging.captureWarnings does, meaning it essentially reverts captureWarnings
# every time it's called which is undesirable
old_init = PdfFileReader.__init__
PdfFileReader.__init__ = lambda self, stream, strict=True, warndest=None, overwriteWarnings=True: \
old_init(self, stream=stream, strict=strict, warndest=None, overwriteWarnings=False)
class OdooPdfFileReader(PdfFileReader):
# OVERRIDE of PdfFileReader to add the management of multiple embedded files.
''' Returns the files inside the PDF.
:raises NotImplementedError: if document is encrypted and uses an unsupported encryption method.
'''
def getAttachments(self):
if self.isEncrypted:
# If the PDF is owner-encrypted, try to unwrap it by giving it an empty user password.
self.decrypt('')
try:
file_path = self.trailer["/Root"].get("/Names", {}).get("/EmbeddedFiles", {}).get("/Names")
if not file_path:
return []
for i in range(0, len(file_path), 2):
attachment = file_path[i+1].getObject()
yield (attachment["/F"], attachment["/EF"]["/F"].getObject().getData())
except Exception:
# malformed pdf (i.e. invalid xref page)
return []
class OdooPdfFileWriter(PdfFileWriter):
def __init__(self, *args, **kwargs):
"""
Override of the init to initialise additional variables.
:param pdf_content: if given, will initialise the reader with the pdf content.
"""
super().__init__(*args, **kwargs)
self._reader = None
self.is_pdfa = False
def addAttachment(self, name, data, subtype=None):
"""
Add an attachment to the pdf. Supports adding multiple attachment, while respecting PDF/A rules.
:param name: The name of the attachement
:param data: The data of the attachement
:param subtype: The mime-type of the attachement. This is required by PDF/A, but not essential otherwise.
It should take the form of "/xxx#2Fxxx". E.g. for "text/xml": "/text#2Fxml"
"""
adapted_subtype = subtype
if subtype:
# If we receive the subtype in an 'unformated' (mimetype) format, we'll try to convert it to a pdf-valid one
if REGEX_SUBTYPE_UNFORMATED.match(subtype):
adapted_subtype = '/' + subtype.replace('/', '#2F')
if not REGEX_SUBTYPE_FORMATED.match(adapted_subtype):
# The subtype still does not match the correct format, so we will not add it to the document
_logger.warning("Attempt to add an attachment with the incorrect subtype '%s'. The subtype will be ignored.", subtype)
adapted_subtype = ''
attachment = self._create_attachment_object({
'filename': name,
'content': data,
'subtype': adapted_subtype,
})
if self._root_object.get('/Names') and self._root_object['/Names'].get('/EmbeddedFiles'):
names_array = self._root_object["/Names"]["/EmbeddedFiles"]["/Names"]
names_array.extend([attachment.getObject()['/F'], attachment])
else:
names_array = ArrayObject()
names_array.extend([attachment.getObject()['/F'], attachment])
embedded_files_names_dictionary = DictionaryObject()
embedded_files_names_dictionary.update({
NameObject("/Names"): names_array
})
embedded_files_dictionary = DictionaryObject()
embedded_files_dictionary.update({
NameObject("/EmbeddedFiles"): embedded_files_names_dictionary
})
self._root_object.update({
NameObject("/Names"): embedded_files_dictionary
})
if self._root_object.get('/AF'):
attachment_array = self._root_object['/AF']
attachment_array.extend([attachment])
else:
# Create a new object containing an array referencing embedded file
# And reference this array in the root catalogue
attachment_array = self._addObject(ArrayObject([attachment]))
self._root_object.update({
NameObject("/AF"): attachment_array
})
def embed_odoo_attachment(self, attachment, subtype=None):
assert attachment, "embed_odoo_attachment cannot be called without attachment."
self.addAttachment(attachment.name, attachment.raw, subtype=subtype or attachment.mimetype)
def cloneReaderDocumentRoot(self, reader):
super().cloneReaderDocumentRoot(reader)
self._reader = reader
# Try to read the header coming in, and reuse it in our new PDF
# This is done in order to allows modifying PDF/A files after creating them (as PyPDF does not read it)
stream = reader.stream
stream.seek(0)
header = stream.readlines(9)
# Should always be true, the first line of a pdf should have 9 bytes (%PDF-1.x plus a newline)
if len(header) == 1:
# If we found a header, set it back to the new pdf
self._header = header[0]
# Also check the second line. If it is PDF/A, it should be a line starting by % following by four bytes + \n
second_line = stream.readlines(1)[0]
if second_line.decode('latin-1')[0] == '%' and len(second_line) == 6:
self._header += second_line
self.is_pdfa = True
# Look if we have an ID in the incoming stream and use it.
pdf_id = reader.trailer.get('/ID', None)
if pdf_id:
self._ID = pdf_id
def convert_to_pdfa(self):
"""
Transform the opened PDF file into a PDF/A compliant file
"""
# Set the PDF version to 1.7 (as PDF/A-3 is based on version 1.7) and make it PDF/A compliant.
# See https://github.com/veraPDF/veraPDF-validation-profiles/wiki/PDFA-Parts-2-and-3-rules#rule-612-1
# " The file header shall begin at byte zero and shall consist of "%PDF-1.n" followed by a single EOL marker,
# where 'n' is a single digit number between 0 (30h) and 7 (37h) "
# " The aforementioned EOL marker shall be immediately followed by a % (25h) character followed by at least four
# bytes, each of whose encoded byte values shall have a decimal value greater than 127 "
self._header = b"%PDF-1.7\n%\xFF\xFF\xFF\xFF"
# Add a document ID to the trailer. This is only needed when using encryption with regular PDF, but is required
# when using PDF/A
pdf_id = ByteStringObject(md5(self._reader.stream.getvalue()).digest())
# The first string is based on the content at the time of creating the file, while the second is based on the
# content of the file when it was last updated. When creating a PDF, both are set to the same value.
self._ID = ArrayObject((pdf_id, pdf_id))
with file_open('tools/data/files/sRGB2014.icc', mode='rb') as icc_profile:
icc_profile_file_data = compress(icc_profile.read())
icc_profile_stream_obj = DecodedStreamObject()
icc_profile_stream_obj.setData(icc_profile_file_data)
icc_profile_stream_obj.update({
NameObject("/Filter"): NameObject("/FlateDecode"),
NameObject("/N"): NumberObject(3),
NameObject("/Length"): NameObject(str(len(icc_profile_file_data))),
})
icc_profile_obj = self._addObject(icc_profile_stream_obj)
output_intent_dict_obj = DictionaryObject()
output_intent_dict_obj.update({
NameObject("/S"): NameObject("/GTS_PDFA1"),
NameObject("/OutputConditionIdentifier"): createStringObject("sRGB"),
NameObject("/DestOutputProfile"): icc_profile_obj,
NameObject("/Type"): NameObject("/OutputIntent"),
})
output_intent_obj = self._addObject(output_intent_dict_obj)
self._root_object.update({
NameObject("/OutputIntents"): ArrayObject([output_intent_obj]),
})
pages = self._root_object['/Pages']['/Kids']
# PDF/A needs the glyphs width array embedded in the pdf to be consistent with the ones from the font file.
# But it seems like it is not the case when exporting from wkhtmltopdf.
if TTFont:
fonts = {}
# First browse through all the pages of the pdf file, to get a reference to all the fonts used in the PDF.
for page in pages:
for font in page.getObject()['/Resources']['/Font'].values():
for descendant in font.getObject()['/DescendantFonts']:
fonts[descendant.idnum] = descendant.getObject()
# Then for each font, rewrite the width array with the information taken directly from the font file.
# The new width are calculated such as width = round(1000 * font_glyph_width / font_units_per_em)
# See: http://martin.hoppenheit.info/blog/2018/pdfa-validation-and-inconsistent-glyph-width-information/
for font in fonts.values():
font_file = font['/FontDescriptor']['/FontFile2']
stream = io.BytesIO(decompress(font_file._data))
ttfont = TTFont(stream)
font_upm = ttfont['head'].unitsPerEm
glyphs = ttfont.getGlyphSet()._hmtx.metrics
glyph_widths = []
for key, values in glyphs.items():
if key[:5] == 'glyph':
glyph_widths.append(NumberObject(round(1000.0 * values[0] / font_upm)))
font[NameObject('/W')] = ArrayObject([NumberObject(1), ArrayObject(glyph_widths)])
stream.close()
else:
_logger.warning('The fonttools package is not installed. Generated PDF may not be PDF/A compliant.')
outlines = self._root_object['/Outlines'].getObject()
outlines[NameObject('/Count')] = NumberObject(1)
# Set odoo as producer
self.addMetadata({
'/Creator': "Odoo",
'/Producer': "Odoo",
})
self.is_pdfa = True
def add_file_metadata(self, metadata_content):
"""
Set the XMP metadata of the pdf, wrapping it with the necessary XMP header/footer.
These are required for a PDF/A file to be completely compliant. Ommiting them would result in validation errors.
:param metadata_content: bytes of the metadata to add to the pdf.
"""
# See https://wwwimages2.adobe.com/content/dam/acom/en/devnet/xmp/pdfs/XMP%20SDK%20Release%20cc-2016-08/XMPSpecificationPart1.pdf
# Page 10/11
header = b'<?xpacket begin="" id="W5M0MpCehiHzreSzNTczkc9d"?>'
footer = b'<?xpacket end="w"?>'
metadata = b'%s%s%s' % (header, metadata_content, footer)
file_entry = DecodedStreamObject()
file_entry.setData(metadata)
file_entry.update({
NameObject("/Type"): NameObject("/Metadata"),
NameObject("/Subtype"): NameObject("/XML"),
NameObject("/Length"): NameObject(str(len(metadata))),
})
# Add the new metadata to the pdf, then redirect the reference to refer to this new object.
metadata_object = self._addObject(file_entry)
self._root_object.update({NameObject("/Metadata"): metadata_object})
def _create_attachment_object(self, attachment):
''' Create a PyPdf2.generic object representing an embedded file.
:param attachment: A dictionary containing:
* filename: The name of the file to embed (required)
* content: The bytes of the file to embed (required)
* subtype: The mime-type of the file to embed (optional)
:return:
'''
file_entry = DecodedStreamObject()
file_entry.setData(attachment['content'])
file_entry.update({
NameObject("/Type"): NameObject("/EmbeddedFile"),
NameObject("/Params"):
DictionaryObject({
NameObject('/CheckSum'): createStringObject(md5(attachment['content']).hexdigest()),
NameObject('/ModDate'): createStringObject(datetime.now().strftime(DEFAULT_PDF_DATETIME_FORMAT)),
NameObject('/Size'): NameObject(str(len(attachment['content']))),
}),
})
if attachment.get('subtype'):
file_entry.update({
NameObject("/Subtype"): NameObject(attachment['subtype']),
})
file_entry_object = self._addObject(file_entry)
filename_object = createStringObject(attachment['filename'])
filespec_object = DictionaryObject({
NameObject("/AFRelationship"): NameObject("/Data"),
NameObject("/Type"): NameObject("/Filespec"),
NameObject("/F"): filename_object,
NameObject("/EF"):
DictionaryObject({
NameObject("/F"): file_entry_object,
NameObject('/UF'): file_entry_object,
}),
NameObject("/UF"): filename_object,
})
if attachment.get('description'):
filespec_object.update({NameObject("/Desc"): createStringObject(attachment['description'])})
return self._addObject(filespec_object)
| 43.533333 | 18,284 |
7,966 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
"""
Mimetypes-related utilities
# TODO: reexport stdlib mimetypes?
"""
import collections
import functools
import io
import logging
import re
import zipfile
__all__ = ['guess_mimetype']
_logger = logging.getLogger(__name__)
# We define our own guess_mimetype implementation and if magic is available we
# use it instead.
# discriminants for zip-based file formats
_ooxml_dirs = {
'word/': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'pt/': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'xl/': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
}
def _check_ooxml(data):
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
filenames = z.namelist()
# OOXML documents should have a [Content_Types].xml file for early
# check that we're interested in this thing at all
if '[Content_Types].xml' not in filenames:
return False
# then there is a directory whose name denotes the type of the file:
# word, pt (powerpoint) or xl (excel)
for dirname, mime in _ooxml_dirs.items():
if any(entry.startswith(dirname) for entry in filenames):
return mime
return False
# checks that a string looks kinda sorta like a mimetype
_mime_validator = re.compile(r"""
[\w-]+ # type-name
/ # subtype separator
[\w-]+ # registration facet or subtype
(?:\.[\w-]+)* # optional faceted name
(?:\+[\w-]+)? # optional structured syntax specifier
""", re.VERBOSE)
def _check_open_container_format(data):
# Open Document Format for Office Applications (OpenDocument) Version 1.2
#
# Part 3: Packages
# 3 Packages
# 3.3 MIME Media Type
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
# If a MIME media type for a document exists, then an OpenDocument
# package should contain a file with name "mimetype".
if 'mimetype' not in z.namelist():
return False
# The content of this file shall be the ASCII encoded MIME media type
# associated with the document.
marcel = z.read('mimetype').decode('ascii')
# check that it's not too long (RFC6838 § 4.2 restricts type and
# subtype to 127 characters each + separator, strongly recommends
# limiting them to 64 but does not require it) and that it looks a lot
# like a valid mime type
if len(marcel) < 256 and _mime_validator.match(marcel):
return marcel
return False
_xls_pattern = re.compile(b"""
\x09\x08\x10\x00\x00\x06\x05\x00
| \xFD\xFF\xFF\xFF(\x10|\x1F|\x20|"|\\#|\\(|\\))
""", re.VERBOSE)
_ppt_pattern = re.compile(b"""
\x00\x6E\x1E\xF0
| \x0F\x00\xE8\x03
| \xA0\x46\x1D\xF0
| \xFD\xFF\xFF\xFF(\x0E|\x1C|\x43)\x00\x00\x00
""", re.VERBOSE)
def _check_olecf(data):
""" Pre-OOXML Office formats are OLE Compound Files which all use the same
file signature ("magic bytes") and should have a subheader at offset 512
(0x200).
Subheaders taken from http://www.garykessler.net/library/file_sigs.html
according to which Mac office files *may* have different subheaders. We'll
ignore that.
"""
offset = 0x200
if data.startswith(b'\xEC\xA5\xC1\x00', offset):
return 'application/msword'
# the _xls_pattern stuff doesn't seem to work correctly (the test file
# only has a bunch of \xf* at offset 0x200), that apparently works
elif b'Microsoft Excel' in data:
return 'application/vnd.ms-excel'
elif _ppt_pattern.match(data, offset):
return 'application/vnd.ms-powerpoint'
return False
def _check_svg(data):
"""This simply checks the existence of the opening and ending SVG tags"""
if b'<svg' in data and b'/svg' in data:
return 'image/svg+xml'
# for "master" formats with many subformats, discriminants is a list of
# functions, tried in order and the first non-falsy value returned is the
# selected mime type. If all functions return falsy values, the master
# mimetype is returned.
_Entry = collections.namedtuple('_Entry', ['mimetype', 'signatures', 'discriminants'])
_mime_mappings = (
# pdf
_Entry('application/pdf', [b'%PDF'], []),
# jpg, jpeg, png, gif, bmp, jfif
_Entry('image/jpeg', [b'\xFF\xD8\xFF\xE0', b'\xFF\xD8\xFF\xE2', b'\xFF\xD8\xFF\xE3', b'\xFF\xD8\xFF\xE1', b'\xFF\xD8\xFF\xDB'], []),
_Entry('image/png', [b'\x89PNG\r\n\x1A\n'], []),
_Entry('image/gif', [b'GIF87a', b'GIF89a'], []),
_Entry('image/bmp', [b'BM'], []),
_Entry('application/xml', [b'<'], [
_check_svg,
]),
_Entry('image/x-icon', [b'\x00\x00\x01\x00'], []),
# OLECF files in general (Word, Excel, PPT, default to word because why not?)
_Entry('application/msword', [b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1', b'\x0D\x44\x4F\x43'], [
_check_olecf
]),
# zip, but will include jar, odt, ods, odp, docx, xlsx, pptx, apk
_Entry('application/zip', [b'PK\x03\x04'], [_check_ooxml, _check_open_container_format]),
)
def _odoo_guess_mimetype(bin_data, default='application/octet-stream'):
""" Attempts to guess the mime type of the provided binary data, similar
to but significantly more limited than libmagic
:param str bin_data: binary data to try and guess a mime type for
:returns: matched mimetype or ``application/octet-stream`` if none matched
"""
# by default, guess the type using the magic number of file hex signature (like magic, but more limited)
# see http://www.filesignatures.net/ for file signatures
for entry in _mime_mappings:
for signature in entry.signatures:
if bin_data.startswith(signature):
for discriminant in entry.discriminants:
try:
guess = discriminant(bin_data)
if guess: return guess
except Exception:
# log-and-next
_logger.getChild('guess_mimetype').warn(
"Sub-checker '%s' of type '%s' failed",
discriminant.__name__, entry.mimetype,
exc_info=True
)
# if no discriminant or no discriminant matches, return
# primary mime type
return entry.mimetype
return default
try:
import magic
except ImportError:
magic = None
if magic:
# There are 2 python libs named 'magic' with incompatible api.
# magic from pypi https://pypi.python.org/pypi/python-magic/
if hasattr(magic, 'from_buffer'):
_guesser = functools.partial(magic.from_buffer, mime=True)
# magic from file(1) https://packages.debian.org/squeeze/python-magic
elif hasattr(magic, 'open'):
ms = magic.open(magic.MAGIC_MIME_TYPE)
ms.load()
_guesser = ms.buffer
def guess_mimetype(bin_data, default=None):
mimetype = _guesser(bin_data[:1024])
# upgrade incorrect mimetype to official one, fixed upstream
# https://github.com/file/file/commit/1a08bb5c235700ba623ffa6f3c95938fe295b262
if mimetype == 'image/svg':
return 'image/svg+xml'
return mimetype
else:
guess_mimetype = _odoo_guess_mimetype
def neuter_mimetype(mimetype, user):
wrong_type = 'ht' in mimetype or 'xml' in mimetype or 'svg' in mimetype
if wrong_type and not user._is_system():
return 'text/plain'
return mimetype
def get_extension(filename):
""" Return the extension the current filename based on the heuristic that
ext is less than or equal to 10 chars and is alphanumeric.
:param str filename: filename to try and guess a extension for
:returns: detected extension or ``
"""
ext = '.' in filename and filename.split('.')[-1]
return ext and len(ext) <= 10 and ext.isalnum() and '.' + ext.lower() or ''
| 38.110048 | 7,965 |
1,038 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
| 34.6 | 1,038 |
711 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import importlib
import logging
import types
_logger = logging.getLogger(__name__)
SUPPORTED_DEBUGGER = {'pdb', 'ipdb', 'wdb', 'pudb'}
def post_mortem(config, info):
if config['dev_mode'] and isinstance(info[2], types.TracebackType):
debug = next((opt for opt in config['dev_mode'] if opt in SUPPORTED_DEBUGGER), None)
if debug:
try:
# Try to import the xpdb from config (pdb, ipdb, pudb, ...)
importlib.import_module(debug).post_mortem(info[2])
except ImportError:
_logger.error('Error while importing %s.' % debug)
| 37.421053 | 711 |
21,332 |
py
|
PYTHON
|
15.0
|
"""
This code is what let us use ES6-style modules in odoo.
Classic Odoo modules are composed of a top-level :samp:`odoo.define({name},{body_function})` call.
This processor will take files starting with an `@odoo-module` annotation (in a comment) and convert them to classic modules.
If any file has the /** odoo-module */ on top of it, it will get processed by this class.
It performs several operations to get from ES6 syntax to the usual odoo one with minimal changes.
This is done on the fly, this not a pre-processing tool.
Caveat: This is done without a full parser, only using regex. One can only expect to cover as much edge cases
as possible with reasonable limitations. Also, this only changes imports and exports, so all JS features used in
the original source need to be supported by the browsers.
"""
import re
from functools import partial
def transpile_javascript(url, content):
"""
Transpile the code from native JS modules to custom odoo modules.
:param content: The original source code
:param url: The url of the file in the project
:return: The transpiled source code
"""
module_path = url_to_module_path(url)
legacy_odoo_define = get_aliased_odoo_define_content(module_path, content)
# The order of the operations does sometimes matter.
steps = [
convert_legacy_default_import,
convert_basic_import,
convert_default_import,
convert_star_import,
convert_unnamed_relative_import,
convert_from_export,
convert_star_from_export,
partial(convert_relative_require, url),
remove_index,
convert_export_function,
convert_export_class,
convert_variable_export,
convert_object_export,
convert_default_export,
partial(wrap_with_odoo_define, module_path),
]
for s in steps:
content = s(content)
if legacy_odoo_define:
content += legacy_odoo_define
return content
URL_RE = re.compile(r"""
/?(?P<module>\S+) # /module name
/([\S/]*/)?static/ # ... /static/
(?P<type>src|tests|lib) # src, test, or lib file
(?P<url>/[\S/]*) # URL (/...)
""", re.VERBOSE)
def url_to_module_path(url):
"""
Odoo modules each have a name. (odoo.define("<the name>", async function (require) {...});
It is used in to be required later. (const { something } = require("<the name>").
The transpiler transforms the url of the file in the project to this name.
It takes the module name and add a @ on the start of it, and map it to be the source of the static/src (or
static/tests, or static/lib) folder in that module.
in: web/static/src/one/two/three.js
out: @web/one/two/three.js
The module would therefore be defined and required by this path.
:param url: an url in the project
:return: a special path starting with @<module-name>.
"""
match = URL_RE.match(url)
if match:
url = match["url"]
if url.endswith(('/index.js', '/index')):
url, _ = url.rsplit('/', 1)
if url.endswith('.js'):
url = url[:-3]
if match["type"] == "src":
return "@%s%s" % (match['module'], url)
elif match["type"] == "lib":
return "@%s/../lib%s" % (match['module'], url)
else:
return "@%s/../tests%s" % (match['module'], url)
else:
raise ValueError("The js file %r must be in the folder '/static/src' or '/static/lib' or '/static/test'" % url)
def wrap_with_odoo_define(module_path, content):
"""
Wraps the current content (source code) with the odoo.define call.
Should logically be called once all other operations have been performed.
"""
return f"""odoo.define({module_path!r}, async function (require) {{
'use strict';
let __exports = {{}};
{content}
return __exports;
}});
"""
EXPORT_FCT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>(async\s+)?function)\s+ # async function or function
(?P<identifier>\w+) # name the function
""", re.MULTILINE | re.VERBOSE)
def convert_export_function(content):
"""
Transpile functions that are being exported.
.. code-block:: javascript
// before
export function name
// after
__exports.name = name; function name
// before
export async function name
// after
__exports.name = name; async function name
"""
repl = r"\g<space>__exports.\g<identifier> = \g<identifier>; \g<type> \g<identifier>"
return EXPORT_FCT_RE.sub(repl, content)
EXPORT_CLASS_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>class)\s+ # class
(?P<identifier>\w+) # name of the class
""", re.MULTILINE | re.VERBOSE)
def convert_export_class(content):
"""
Transpile classes that are being exported.
.. code-block:: javascript
// before
export class name
// after
const name = __exports.name = class name
"""
repl = r"\g<space>const \g<identifier> = __exports.\g<identifier> = \g<type> \g<identifier>"
return EXPORT_CLASS_RE.sub(repl, content)
EXPORT_FCT_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>(async\s+)?function)\s+ # async function or function
(?P<identifier>\w+) # name of the function
""", re.MULTILINE | re.VERBOSE)
def convert_export_function_default(content):
"""
Transpile functions that are being exported as default value.
.. code-block:: javascript
// before
export default function name
// after
__exports[Symbol.for("default")] = name; function name
// before
export default async function name
// after
__exports[Symbol.for("default")] = name; async function name
"""
repl = r"""\g<space>__exports[Symbol.for("default")] = \g<identifier>; \g<type> \g<identifier>"""
return EXPORT_FCT_DEFAULT_RE.sub(repl, content)
EXPORT_CLASS_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>class)\s+ # class
(?P<identifier>\w+) # name of the class or the function
""", re.MULTILINE | re.VERBOSE)
def convert_export_class_default(content):
"""
Transpile classes that are being exported as default value.
.. code-block:: javascript
// before
export default class name
// after
const name = __exports[Symbol.for("default")] = class name
"""
repl = r"""\g<space>const \g<identifier> = __exports[Symbol.for("default")] = \g<type> \g<identifier>"""
return EXPORT_CLASS_DEFAULT_RE.sub(repl, content)
EXPORT_VAR_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+ # export
(?P<type>let|const|var)\s+ # let or cont or var
(?P<identifier>\w+) # variable name
""", re.MULTILINE | re.VERBOSE)
def convert_variable_export(content):
"""
Transpile variables that are being exported.
.. code-block:: javascript
// before
export let name
// after
let name = __exports.name
// (same with var and const)
"""
repl = r"\g<space>\g<type> \g<identifier> = __exports.\g<identifier>"
return EXPORT_VAR_RE.sub(repl, content)
EXPORT_DEFAULT_VAR_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+default\s+ # export default
(?P<type>let|const|var)\s+ # let or const or var
(?P<identifier>\w+)\s* # variable name
""", re.MULTILINE | re.VERBOSE)
def convert_variable_export_default(content):
"""
Transpile the variables that are exported as default values.
.. code-block:: javascript
// before
export default let name
// after
let name = __exports[Symbol.for("default")]
"""
repl = r"""\g<space>\g<type> \g<identifier> = __exports[Symbol.for("default")]"""
return EXPORT_DEFAULT_VAR_RE.sub(repl, content)
EXPORT_OBJECT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s* # export
(?P<object>{[\w\s,]+}) # { a, b, c as x, ... }
""", re.MULTILINE | re.VERBOSE)
def convert_object_export(content):
"""
Transpile exports of multiple elements
.. code-block:: javascript
// before
export { a, b, c as x }
// after
Object.assign(__exports, { a, b, x: c })
"""
def repl(matchobj):
object_process = "{" + ", ".join([convert_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
space = matchobj["space"]
return f"{space}Object.assign(__exports, {object_process})"
return EXPORT_OBJECT_RE.sub(repl, content)
EXPORT_FROM_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s* # export
(?P<object>{[\w\s,]+})\s* # { a, b, c as x, ... }
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path.js")
""", re.MULTILINE | re.VERBOSE)
def convert_from_export(content):
"""
Transpile exports coming from another source
.. code-block:: javascript
// before
export { a, b, c as x } from "some/path.js"
// after
{ a, b, c } = {require("some/path.js"); Object.assign(__exports, { a, b, x: c });}
"""
def repl(matchobj):
object_clean = "{" + ",".join([remove_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
object_process = "{" + ", ".join([convert_as(val) for val in matchobj["object"][1:-1].split(",")]) + "}"
return "%(space)s{const %(object_clean)s = require(%(path)s);Object.assign(__exports, %(object_process)s)}" % {
'object_clean': object_clean,
'object_process': object_process,
'space': matchobj['space'],
'path': matchobj['path'],
}
return EXPORT_FROM_RE.sub(repl, content)
EXPORT_STAR_FROM_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s*\*\s*from\s* # export * from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path.js")
""", re.MULTILINE | re.VERBOSE)
def convert_star_from_export(content):
"""
Transpile exports star coming from another source
.. code-block:: javascript
// before
export * from "some/path.js"
// after
Object.assign(__exports, require("some/path.js"))
"""
repl = r"\g<space>Object.assign(__exports, require(\g<path>))"
return EXPORT_STAR_FROM_RE.sub(repl, content)
EXPORT_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
export\s+default # export default
(\s+\w+\s*=)? # something (optional)
""", re.MULTILINE | re.VERBOSE)
def convert_default_export(content):
"""
This function handles the default exports.
Either by calling another operation with a TRUE flag, and if any default is left, doing a simple replacement.
(see convert_export_function_or_class_default and convert_variable_export_default).
+
.. code-block:: javascript
// before
export default
// after
__exports[Symbol.for("default")] =
.. code-block:: javascript
// before
export default something =
// after
__exports[Symbol.for("default")] =
"""
new_content = convert_export_function_default(content)
new_content = convert_export_class_default(new_content)
new_content = convert_variable_export_default(new_content)
repl = r"""\g<space>__exports[Symbol.for("default")] ="""
return EXPORT_DEFAULT_RE.sub(repl, new_content)
IMPORT_BASIC_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<object>{(\s*\w+\s*,?\s*)+})\s* # { a, b, c as x, ... }
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
""", re.MULTILINE | re.VERBOSE)
def convert_basic_import(content):
"""
Transpile the simpler import call.
.. code-block:: javascript
// before
import { a, b, c as x } from "some/path"
// after
const {a, b, c: x} = require("some/path")
"""
def repl(matchobj):
new_object = matchobj["object"].replace(" as ", ": ")
return f"{matchobj['space']}const {new_object} = require({matchobj['path']})"
return IMPORT_BASIC_RE.sub(repl, content)
IMPORT_LEGACY_DEFAULT_RE = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<identifier>\w+)\s* # default variable name
from\s* # from
(?P<path>(?P<quote>["'`])([^@\."'`][^"'`]*)(?P=quote)) # legacy alias file ("addon_name.module_name" or "some/path")
""", re.MULTILINE | re.VERBOSE)
def convert_legacy_default_import(content):
"""
Transpile legacy imports (that were used as they were default import).
Legacy imports means that their name is not a path but a <addon_name>.<module_name>.
It requires slightly different processing.
.. code-block:: javascript
// before
import module_name from "addon.module_name"
// after
const module_name = require("addon.module_name")
"""
repl = r"""\g<space>const \g<identifier> = require(\g<path>)"""
return IMPORT_LEGACY_DEFAULT_RE.sub(repl, content)
IMPORT_DEFAULT = re.compile(r"""
^
(?P<space>\s*) # space and empty line
import\s+ # import
(?P<identifier>\w+)\s* # default variable name
from\s* # from
(?P<path>(?P<quote>["'`])([^"'`]+)(?P=quote)) # "file path" ("some/path")
""", re.MULTILINE | re.VERBOSE)
def convert_default_import(content):
"""
Transpile the default import call.
.. code-block:: javascript
// before
import something from "some/path"
// after
const something = require("some/path")[Symbol.for("default")]
"""
repl = r"""\g<space>const \g<identifier> = require(\g<path>)[Symbol.for("default")]"""
return IMPORT_DEFAULT.sub(repl, content)
RELATIVE_REQUIRE_RE = re.compile(r"""
require\((?P<quote>["'`])([^@"'`]+)(?P=quote)\) # require("some/path")
""", re.VERBOSE)
def convert_relative_require(url, content):
"""
Convert the relative path contained in a 'require()'
to the new path system (@module/path)
.. code-block:: javascript
// Relative path:
// before
require("./path")
// after
require("@module/path")
// Not a relative path:
// before
require("other_alias")
// after
require("other_alias")
"""
new_content = content
for quote, path in RELATIVE_REQUIRE_RE.findall(new_content):
if path.startswith(".") and "/" in path:
pattern = rf"require\({quote}{path}{quote}\)"
repl = f'require("{relative_path_to_module_path(url, path)}")'
new_content = re.sub(pattern, repl, new_content)
return new_content
IMPORT_STAR = re.compile(r"""
^(?P<space>\s*) # indentation
import\s+\*\s+as\s+ # import * as
(?P<identifier>\w+) # alias
\s*from\s* # from
(?P<path>[^;\n]+) # path
""", re.MULTILINE | re.VERBOSE)
def convert_star_import(content):
"""
Transpile import star.
.. code-block:: javascript
// before
import * as name from "some/path"
// after
const name = require("some/path")
"""
repl = r"\g<space>const \g<identifier> = require(\g<path>)"
return IMPORT_STAR.sub(repl, content)
IMPORT_UNNAMED_RELATIVE_RE = re.compile(r"""
^(?P<space>\s*) # indentation
import\s+ # import
(?P<path>[^;\n]+) # relative path
""", re.MULTILINE | re.VERBOSE)
def convert_unnamed_relative_import(content):
"""
Transpile relative "direct" imports. Direct meaning they are not store in a variable.
.. code-block:: javascript
// before
import "some/path"
// after
require("some/path")
"""
repl = r"require(\g<path>)"
return IMPORT_UNNAMED_RELATIVE_RE.sub(repl, content)
URL_INDEX_RE = re.compile(r"""
require\s* # require
\(\s* # (
(?P<path>(?P<quote>["'`])([^"'`]*/index/?)(?P=quote)) # path ended by /index or /index/
\s*\) # )
""", re.MULTILINE | re.VERBOSE)
def remove_index(content):
"""
Remove in the paths the /index.js.
We want to be able to import a module just trough its directory name if it contains an index.js.
So we no longer need to specify the index.js in the paths.
"""
def repl(matchobj):
path = matchobj["path"]
new_path = path[: path.rfind("/index")] + path[0]
return f"require({new_path})"
return URL_INDEX_RE.sub(repl, content)
def relative_path_to_module_path(url, path_rel):
"""
Convert the relative path into a module path, which is more generic and fancy.
:param path_rel: a relative path to the current url.
:return: module path (@module/...)
"""
url_split = url.split("/")
path_rel_split = path_rel.split("/")
nb_back = len([v for v in path_rel_split if v == ".."]) + 1
result = "/".join(url_split[:-nb_back] + [v for v in path_rel_split if not v in ["..", "."]])
return url_to_module_path(result)
ODOO_MODULE_RE = re.compile(r"""
\s* # some starting space
\/(\*|\/).*\s* # // or /*
@odoo-module # @odoo-module
(\s+alias=(?P<alias>[\w.]+))? # alias=web.AbstractAction (optional)
(\s+default=(?P<default>False|false|0))? # default=False or false or 0 (optional)
""", re.VERBOSE)
def is_odoo_module(content):
"""
Detect if the file is a native odoo module.
We look for a comment containing @odoo-module.
:param content: source code
:return: is this a odoo module that need transpilation ?
"""
result = ODOO_MODULE_RE.match(content)
return bool(result)
def get_aliased_odoo_define_content(module_path, content):
"""
To allow smooth transition between the new system and the legacy one, we have the possibility to
defined an alternative module name (an alias) that will act as proxy between legacy require calls and
new modules.
Example:
If we have a require call somewhere in the odoo source base being:
> vat AbstractAction require("web.AbstractAction")
we have a problem when we will have converted to module to ES6: its new name will be more like
"web/chrome/abstract_action". So the require would fail !
So we add a second small modules, an alias, as such:
> odoo.define("web/chrome/abstract_action", async function(require) {
> return require('web.AbstractAction')[Symbol.for("default")];
> });
To generate this, change your comment on the top of the file.
.. code-block:: javascript
// before
/** @odoo-module */
// after
/** @odoo-module alias=web.AbstractAction */
Notice that often, the legacy system acted like they it did defaukt imports. That's why we have the
"[Symbol.for("default")];" bit. If your use case does not need this default import, just do:
.. code-block:: javascript
// before
/** @odoo-module */
// after
/** @odoo-module alias=web.AbstractAction default=false */
:return: the alias content to append to the source code.
"""
matchobj = ODOO_MODULE_RE.match(content)
if matchobj:
alias = matchobj['alias']
if alias:
if matchobj['default']:
return """\nodoo.define(`%s`, async function(require) {
return require('%s');
});\n""" % (alias, module_path)
else:
return """\nodoo.define(`%s`, async function(require) {
return require('%s')[Symbol.for("default")];
});\n""" % (alias, module_path)
def convert_as(val):
parts = val.split(" as ")
return val if len(parts) < 2 else "%s: %s" % tuple(reversed(parts))
def remove_as(val):
parts = val.split(" as ")
return val if len(parts) < 2 else parts[0]
| 32.567939 | 21,332 |
2,199 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import dateutil.relativedelta as relativedelta
import functools
import re
from markupsafe import Markup
from werkzeug import urls
from odoo.tools import safe_eval
INLINE_TEMPLATE_REGEX = re.compile(r"\{\{(.+?)\}\}")
def relativedelta_proxy(*args, **kwargs):
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently
return relativedelta.relativedelta(*args, **kwargs)
template_env_globals = {
'str': str,
'quote': urls.url_quote,
'urlencode': urls.url_encode,
'datetime': safe_eval.datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': functools.reduce,
'map': map,
'relativedelta': relativedelta.relativedelta,
'round': round,
'hasattr': hasattr,
}
def parse_inline_template(text):
groups = []
current_literal_index = 0
for match in INLINE_TEMPLATE_REGEX.finditer(text):
literal = text[current_literal_index:match.start()]
expression = match.group(1)
groups.append((literal, expression))
current_literal_index = match.end()
# string past last regex match
literal = text[current_literal_index:]
if literal:
groups.append((literal, ''))
return groups
def convert_inline_template_to_qweb(template):
template_instructions = parse_inline_template(template or '')
preview_markup = []
for string, expression in template_instructions:
if expression:
preview_markup.append(Markup('{}<t t-out="{}"/>').format(string, expression))
else:
preview_markup.append(string)
return Markup('').join(preview_markup)
def render_inline_template(template_instructions, variables):
results = []
for string, expression in template_instructions:
results.append(string)
if expression:
result = safe_eval.safe_eval(expression, variables)
if result:
results.append(str(result))
return ''.join(results)
| 29.32 | 2,199 |
19,969 |
py
|
PYTHON
|
15.0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
from __future__ import print_function
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 3, 0)
__version__ = '.'.join(str(v) for v in __version_info__)
import sys
import os
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if sys.platform in [ "win32", "darwin" ]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform in [ "win32", "darwin" ]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
if opinion:
path = os.path.join(path, "Cache")
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif sys.platform == "win32":
path = user_data_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = str(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| 42.394904 | 19,968 |
13,988 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import pathlib
import os
import re
import shutil
import odoo
from odoo.tools.config import config
VERSION = 1
DEFAULT_EXCLUDE = [
"__manifest__.py",
"__openerp__.py",
"tests/**/*",
"static/lib/**/*",
"static/tests/**/*",
"migrations/**/*",
"upgrades/**/*",
]
STANDARD_MODULES = ['web', 'web_enterprise', 'theme_common', 'base']
MAX_FILE_SIZE = 25 * 2**20 # 25 MB
MAX_LINE_SIZE = 100000
VALID_EXTENSION = ['.py', '.js', '.xml', '.css', '.scss']
class Cloc(object):
def __init__(self):
self.modules = {}
self.code = {}
self.total = {}
self.errors = {}
self.excluded = {}
self.max_width = 70
#------------------------------------------------------
# Parse
#------------------------------------------------------
def parse_xml(self, s):
s = s.strip() + "\n"
# Unbalanced xml comments inside a CDATA are not supported, and xml
# comments inside a CDATA will (wrongly) be considered as comment
total = s.count("\n")
s = re.sub("(<!--.*?-->)", "", s, flags=re.DOTALL)
s = re.sub(r"\s*\n\s*", r"\n", s).lstrip()
return s.count("\n"), total
def parse_py(self, s):
try:
s = s.strip() + "\n"
total = s.count("\n")
lines = set()
for i in ast.walk(ast.parse(s)):
# we only count 1 for a long string or a docstring
if hasattr(i, 'lineno'):
lines.add(i.lineno)
return len(lines), total
except Exception:
return (-1, "Syntax Error")
def parse_c_like(self, s, regex):
# Based on https://stackoverflow.com/questions/241327
s = s.strip() + "\n"
total = s.count("\n")
# To avoid to use too much memory we don't try to count file
# with very large line, usually minified file
if max(len(l) for l in s.split('\n')) > MAX_LINE_SIZE:
return -1, "Max line size exceeded"
def replacer(match):
s = match.group(0)
return " " if s.startswith('/') else s
comments_re = re.compile(regex, re.DOTALL | re.MULTILINE)
s = re.sub(comments_re, replacer, s)
s = re.sub(r"\s*\n\s*", r"\n", s).lstrip()
return s.count("\n"), total
def parse_js(self, s):
return self.parse_c_like(s, r'//.*?$|(?<!\\)/\*.*?\*/|\'(\\.|[^\\\'])*\'|"(\\.|[^\\"])*"')
def parse_scss(self, s):
return self.parse_c_like(s, r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"')
def parse_css(self, s):
return self.parse_c_like(s, r'/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"')
def parse(self, s, ext):
if ext == '.py':
return self.parse_py(s)
elif ext == '.js':
return self.parse_js(s)
elif ext == '.xml':
return self.parse_xml(s)
elif ext == '.css':
return self.parse_css(s)
elif ext == '.scss':
return self.parse_scss(s)
#------------------------------------------------------
# Enumeration
#------------------------------------------------------
def book(self, module, item='', count=(0, 0), exclude=False):
if count[0] == -1:
self.errors.setdefault(module, {})
self.errors[module][item] = count[1]
elif exclude and item:
self.excluded.setdefault(module, {})
self.excluded[module][item] = count
else:
self.modules.setdefault(module, {})
if item:
self.modules[module][item] = count
self.code[module] = self.code.get(module, 0) + count[0]
self.total[module] = self.total.get(module, 0) + count[1]
self.max_width = max(self.max_width, len(module), len(item) + 4)
def count_path(self, path, exclude=None):
path = path.rstrip('/')
exclude_list = []
for i in odoo.modules.module.MANIFEST_NAMES:
manifest_path = os.path.join(path, i)
try:
with open(manifest_path, 'rb') as manifest:
exclude_list.extend(DEFAULT_EXCLUDE)
d = ast.literal_eval(manifest.read().decode('latin1'))
for j in ['cloc_exclude', 'demo', 'demo_xml']:
exclude_list.extend(d.get(j, []))
break
except Exception:
pass
if not exclude:
exclude = set()
for i in filter(None, exclude_list):
exclude.update(str(p) for p in pathlib.Path(path).glob(i))
module_name = os.path.basename(path)
self.book(module_name)
for root, dirs, files in os.walk(path):
for file_name in files:
file_path = os.path.join(root, file_name)
if file_path in exclude:
continue
ext = os.path.splitext(file_path)[1].lower()
if ext not in VALID_EXTENSION:
continue
if os.path.getsize(file_path) > MAX_FILE_SIZE:
self.book(module_name, file_path, (-1, "Max file size exceeded"))
continue
with open(file_path, 'rb') as f:
# Decode using latin1 to avoid error that may raise by decoding with utf8
# The chars not correctly decoded in latin1 have no impact on how many lines will be counted
content = f.read().decode('latin1')
self.book(module_name, file_path, self.parse(content, ext))
def count_modules(self, env):
# Exclude standard addons paths
exclude_heuristic = [odoo.modules.get_module_path(m, display_warning=False) for m in STANDARD_MODULES]
exclude_path = set([os.path.dirname(os.path.realpath(m)) for m in exclude_heuristic if m])
domain = [('state', '=', 'installed')]
# if base_import_module is present
if env['ir.module.module']._fields.get('imported'):
domain.append(('imported', '=', False))
module_list = env['ir.module.module'].search(domain).mapped('name')
for module_name in module_list:
module_path = os.path.realpath(odoo.modules.get_module_path(module_name))
if module_path:
if any(module_path.startswith(i) for i in exclude_path):
continue
self.count_path(module_path)
def count_customization(self, env):
imported_module_sa = ""
if env['ir.module.module']._fields.get('imported'):
imported_module_sa = "OR (m.imported = TRUE AND m.state = 'installed')"
query = """
SELECT s.id, min(m.name), array_agg(d.module)
FROM ir_act_server AS s
LEFT JOIN ir_model_data AS d
ON (d.res_id = s.id AND d.model = 'ir.actions.server')
LEFT JOIN ir_module_module AS m
ON m.name = d.module
WHERE s.state = 'code' AND (m.name IS null {})
GROUP BY s.id
""".format(imported_module_sa)
env.cr.execute(query)
data = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
for a in env['ir.actions.server'].browse(data.keys()):
self.book(
data[a.id][0] or "odoo/studio",
"ir.actions.server/%s: %s" % (a.id, a.name),
self.parse_py(a.code),
'__cloc_exclude__' in data[a.id][1]
)
imported_module_field = ("'odoo/studio'", "")
if env['ir.module.module']._fields.get('imported'):
imported_module_field = ("min(m.name)", "AND m.imported = TRUE AND m.state = 'installed'")
# We always want to count manual compute field unless they are generated by studio
# the module should be odoo/studio unless it comes from an imported module install
# because manual field get an external id from the original module of the model
query = r"""
SELECT f.id, f.name, {}, array_agg(d.module)
FROM ir_model_fields AS f
LEFT JOIN ir_model_data AS d ON (d.res_id = f.id AND d.model = 'ir.model.fields')
LEFT JOIN ir_module_module AS m ON m.name = d.module {}
WHERE f.compute IS NOT null AND f.state = 'manual'
GROUP BY f.id, f.name
""".format(*imported_module_field)
env.cr.execute(query)
# Do not count field generated by studio
all_data = env.cr.fetchall()
data = {r[0]: (r[2], r[3]) for r in all_data if not ("studio_customization" in r[3] and not r[1].startswith('x_studio'))}
for f in env['ir.model.fields'].browse(data.keys()):
self.book(
data[f.id][0] or "odoo/studio",
"ir.model.fields/%s: %s" % (f.id, f.name),
self.parse_py(f.compute),
'__cloc_exclude__' in data[f.id][1]
)
if not env['ir.module.module']._fields.get('imported'):
return
# Count qweb view only from imported module and not studio
query = """
SELECT view.id, min(mod.name), array_agg(data.module)
FROM ir_ui_view view
INNER JOIN ir_model_data data ON view.id = data.res_id AND data.model = 'ir.ui.view'
LEFT JOIN ir_module_module mod ON mod.name = data.module AND mod.imported = True
WHERE view.type = 'qweb' AND data.module != 'studio_customization'
GROUP BY view.id
HAVING count(mod.name) > 0
"""
env.cr.execute(query)
custom_views = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
for view in env['ir.ui.view'].browse(custom_views.keys()):
module_name = custom_views[view.id][0]
self.book(
module_name,
"/%s/views/%s.xml" % (module_name, view.name),
self.parse_xml(view.arch_base),
'__cloc_exclude__' in custom_views[view.id][1]
)
# Count js, xml, css/scss file from imported module
query = r"""
SELECT attach.id, min(mod.name), array_agg(data.module)
FROM ir_attachment attach
INNER JOIN ir_model_data data ON attach.id = data.res_id AND data.model = 'ir.attachment'
LEFT JOIN ir_module_module mod ON mod.name = data.module AND mod.imported = True
WHERE attach.name ~ '.*\.(js|xml|css|scss)$'
GROUP BY attach.id
HAVING count(mod.name) > 0
"""
env.cr.execute(query)
uploaded_file = {r[0]: (r[1], r[2]) for r in env.cr.fetchall()}
for attach in env['ir.attachment'].browse(uploaded_file.keys()):
module_name = uploaded_file[attach.id][0]
ext = os.path.splitext(attach.url)[1].lower()
if ext not in VALID_EXTENSION:
continue
if len(attach.datas) > MAX_FILE_SIZE:
self.book(module_name, attach.url, (-1, "Max file size exceeded"))
continue
# Decode using latin1 to avoid error that may raise by decoding with utf8
# The chars not correctly decoded in latin1 have no impact on how many lines will be counted
content = attach.raw.decode('latin1')
self.book(
module_name,
attach.url,
self.parse(content, ext),
'__cloc_exclude__' in uploaded_file[attach.id][1],
)
def count_env(self, env):
self.count_modules(env)
self.count_customization(env)
def count_database(self, database):
registry = odoo.registry(config['db_name'])
with registry.cursor() as cr:
uid = odoo.SUPERUSER_ID
env = odoo.api.Environment(cr, uid, {})
self.count_env(env)
#------------------------------------------------------
# Report
#------------------------------------------------------
# pylint: disable=W0141
def report(self, verbose=False, width=None):
# Prepare format
if not width:
width = min(self.max_width, shutil.get_terminal_size()[0] - 24)
hr = "-" * (width + 24) + "\n"
fmt = '{k:%d}{lines:>8}{other:>8}{code:>8}\n' % (width,)
# Render
s = fmt.format(k="Odoo cloc", lines="Line", other="Other", code="Code")
s += hr
for m in sorted(self.modules):
s += fmt.format(k=m, lines=self.total[m], other=self.total[m]-self.code[m], code=self.code[m])
if verbose:
for i in sorted(self.modules[m], key=lambda i: self.modules[m][i][0], reverse=True):
code, total = self.modules[m][i]
s += fmt.format(k=' ' + i, lines=total, other=total - code, code=code)
s += hr
total = sum(self.total.values())
code = sum(self.code.values())
s += fmt.format(k='', lines=total, other=total - code, code=code)
print(s)
if self.excluded and verbose:
ex = fmt.format(k="Excluded", lines="Line", other="Other", code="Code")
ex += hr
for m in sorted(self.excluded):
for i in sorted(self.excluded[m], key=lambda i: self.excluded[m][i][0], reverse=True):
code, total = self.excluded[m][i]
ex += fmt.format(k=' ' + i, lines=total, other=total - code, code=code)
ex += hr
print(ex)
if self.errors:
e = "\nErrors\n\n"
for m in sorted(self.errors):
e += "{}\n".format(m)
for i in sorted(self.errors[m]):
e += fmt.format(k=' ' + i, lines=self.errors[m][i], other='', code='')
print(e)
| 41.384615 | 13,988 |
12,274 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Helper functions for reports testing.
Please /do not/ import this file by default, but only explicitly call it
through the code of python tests.
"""
import logging
import os
import tempfile
from subprocess import Popen, PIPE
from .. import api
from . import ustr, config
from .safe_eval import safe_eval
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('odoo.tests')
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None, report_type=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if context is None:
context = {}
_test_logger.info(" - Trying %s.create(%r)", rname, ids)
env = api.Environment(cr, uid, context)
report_id = env['ir.actions.report'].search([('report_name', '=', rname)], limit=1)
if not report_id:
raise Exception("Required report does not exist: %s" % rname)
res_data, res_format = report_id._render(ids, data=data)
if not res_data:
raise ValueError("Report %s produced an empty result!" % rname)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != b'%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
proc = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE)
stdout, stderr = proc.communicate()
res_text = ustr(stdout)
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'html':
pass
else:
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
_test_logger.info(" + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
wiz_data=None, wiz_buttons=None,
context=None, our_module=None):
"""Take an ir.actions.act_window and follow it until a report is produced
:param action_id: the integer id of an action, or a reference to xml id
of the act_window (can search [our_module.]+xml_id
:param active_model, active_ids: call the action as if it had been launched
from that model+ids (tree/form view action)
:param wiz_data: a dictionary of values to use in the wizard, if needed.
They will override (or complete) the default values of the
wizard form.
:param wiz_buttons: a list of button names, or button icon strings, which
should be preferred to press during the wizard.
Eg. 'OK' or 'fa-print'
:param our_module: the name of the calling module (string), like 'account'
"""
if not our_module and isinstance(action_id, str):
if '.' in action_id:
our_module = action_id.split('.', 1)[0]
context = dict(context or {})
# TODO context fill-up
env = api.Environment(cr, uid, context)
def log_test(msg, *args):
_test_logger.info(" - " + msg, *args)
datas = {}
if active_model:
datas['model'] = active_model
if active_ids:
datas['ids'] = active_ids
if not wiz_buttons:
wiz_buttons = []
if isinstance(action_id, str):
if '.' in action_id:
_, act_xmlid = action_id.split('.', 1)
else:
if not our_module:
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
act_xmlid = action_id
action_id = '%s.%s' % (our_module, action_id)
action = env.ref(action_id)
act_model, act_id = action._name, action.id
else:
assert isinstance(action_id, int)
act_model = 'ir.actions.act_window' # assume that
act_id = action_id
act_xmlid = '<%s>' % act_id
def _exec_action(action, datas, env):
# taken from client/modules/action/main.py:84 _exec_action()
if isinstance(action, bool) or 'type' not in action:
return
# Updating the context : Adding the context of action in order to use it on Views called from buttons
context = dict(env.context)
if datas.get('id',False):
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
context1 = action.get('context', {})
if isinstance(context1, str):
context1 = safe_eval(context1, dict(context))
context.update(context1)
env = env(context=context)
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
for key in ('res_id', 'res_model', 'view_mode',
'limit', 'search_view', 'search_view_id'):
datas[key] = action.get(key, datas.get(key, None))
view_id = False
view_type = None
if action.get('views', []):
if isinstance(action['views'],list):
view_id, view_type = action['views'][0]
datas['view_mode']= view_type
else:
if action.get('view_id', False):
view_id = action['view_id'][0]
elif action.get('view_id', False):
view_id = action['view_id'][0]
if view_type is None:
if view_id:
view_type = env['ir.ui.view'].browse(view_id).type
else:
view_type = action['view_mode'].split(',')[0]
assert datas['res_model'], "Cannot use the view without a model"
# Here, we have a view that we need to emulate
log_test("will emulate a %s view: %s#%s",
view_type, datas['res_model'], view_id or '?')
view_res = env[datas['res_model']].fields_view_get(view_id, view_type=view_type)
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
view_data = {}
if view_res.get('fields'):
view_data = env[datas['res_model']].default_get(list(view_res['fields']))
if datas.get('form'):
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
# we need to send a [(6,0,[int,..])]
if field['type'] in ('one2many', 'many2many') \
and view_data.get(fk, False) \
and isinstance(view_data[fk], list) \
and not isinstance(view_data[fk][0], tuple) :
view_data[fk] = [(6, 0, view_data[fk])]
action_name = action.get('name')
try:
from xml.dom import minidom
cancel_found = False
buttons = []
dom_doc = minidom.parseString(view_res['arch'])
if not action_name:
action_name = dom_doc.documentElement.getAttribute('name')
for button in dom_doc.getElementsByTagName('button'):
button_weight = 0
if button.getAttribute('special') == 'cancel':
cancel_found = True
continue
if button.getAttribute('icon') == 'fa-times-circle':
cancel_found = True
continue
if button.getAttribute('default_focus') == '1':
button_weight += 20
if button.getAttribute('string') in wiz_buttons:
button_weight += 30
elif button.getAttribute('icon') in wiz_buttons:
button_weight += 10
string = button.getAttribute('string') or '?%s' % len(buttons)
buttons.append({
'name': button.getAttribute('name'),
'string': string,
'type': button.getAttribute('type'),
'weight': button_weight,
})
except Exception as e:
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
# it is probably an orm_memory object, we need to create
# an instance
datas['res_id'] = env[datas['res_model']].create(view_data).id
if not buttons:
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
b = buttons.pop()
log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string'])
if not b['type']:
log_test("the \"%s\" button has no type, cannot use it", b['string'])
continue
if b['type'] == 'object':
#there we are! press the button!
rec = env[datas['res_model']].browse(datas['res_id'])
func = getattr(rec, b['name'], None)
if not func:
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = func()
break
else:
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
elif action['type']=='ir.actions.report':
if 'window' in datas:
del datas['window']
if not datas:
datas = action.get('datas')
if not datas:
datas = action.get('data')
datas = datas.copy()
ids = datas.get('ids')
if 'ids' in datas:
del datas['ids']
res = try_report(cr, uid, action['report_name'], ids, datas, context, our_module=our_module)
return res
else:
raise Exception("Cannot handle action of type %s" % act_model)
log_test("will be using %s action %s #%d", act_model, act_xmlid, act_id)
action = env[act_model].browse(act_id).read()[0]
assert action, "Could not read action %s[%s]" % (act_model, act_id)
loop = 0
while action:
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
_logger.info("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, env)
if not isinstance(result, dict):
break
datas = result.get('datas', {})
if datas:
del result['datas']
action = result
return True
| 42.178694 | 12,274 |
1,336 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
import collections
import threading
from .func import synchronized
__all__ = ['LRU']
class LRU(object):
"""
Implementation of a length-limited O(1) LRU map.
Original Copyright 2003 Josiah Carlson, later rebuilt on OrderedDict.
"""
def __init__(self, count, pairs=()):
self._lock = threading.RLock()
self.count = max(count, 1)
self.d = collections.OrderedDict()
for key, value in pairs:
self[key] = value
@synchronized()
def __contains__(self, obj):
return obj in self.d
def get(self, obj, val=None):
try:
return self[obj]
except KeyError:
return val
@synchronized()
def __getitem__(self, obj):
a = self.d[obj]
self.d.move_to_end(obj, last=False)
return a
@synchronized()
def __setitem__(self, obj, val):
self.d[obj] = val
self.d.move_to_end(obj, last=False)
while len(self.d) > self.count:
self.d.popitem(last=True)
@synchronized()
def __delitem__(self, obj):
del self.d[obj]
@synchronized()
def __len__(self):
return len(self.d)
@synchronized()
def pop(self,key):
return self.d.pop(key)
@synchronized()
def clear(self):
self.d.clear()
| 22.644068 | 1,336 |
1,116 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
#pylint: disable=deprecated-module
import csv
import codecs
import io
_reader = codecs.getreader('utf-8')
_writer = codecs.getwriter('utf-8')
def csv_reader(stream, **params):
assert not isinstance(stream, io.TextIOBase),\
"For cross-compatibility purposes, csv_reader takes a bytes stream"
return csv.reader(_reader(stream), **params)
def csv_writer(stream, **params):
assert not isinstance(stream, io.TextIOBase), \
"For cross-compatibility purposes, csv_writer takes a bytes stream"
return csv.writer(_writer(stream), **params)
def to_text(source):
""" Generates a text value (an instance of text_type) from an arbitrary
source.
* False and None are converted to empty strings
* text is passed through
* bytes are decoded as UTF-8
* rest is textified via the current version's relevant data model method
"""
if source is None or source is False:
return u''
if isinstance(source, bytes):
return source.decode('utf-8')
if isinstance(source, str):
return source
return str(source)
| 27.219512 | 1,116 |
6,594 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Some functions related to the os and os.path module
"""
import logging
import os
import re
import tempfile
import zipfile
from contextlib import contextmanager
from os.path import join as opj
_logger = logging.getLogger(__name__)
WINDOWS_RESERVED = re.compile(r'''
^
# forbidden stems: reserved keywords
(:?CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])
# even with an extension this is recommended against
(:?\..*)?
$
''', flags=re.IGNORECASE | re.VERBOSE)
def clean_filename(name, replacement=''):
""" Strips or replaces possibly problematic or annoying characters our of
the input string, in order to make it a valid filename in most operating
systems (including dropping reserved Windows filenames).
If this results in an empty string, results in "Untitled" (localized).
Allows:
* any alphanumeric character (unicode)
* underscore (_) as that's innocuous
* dot (.) except in leading position to avoid creating dotfiles
* dash (-) except in leading position to avoid annoyance / confusion with
command options
* brackets ([ and ]), while they correspond to shell *character class*
they're a common way to mark / tag files especially on windows
* parenthesis ("(" and ")"), a more natural though less common version of
the former
* space (" ")
:param str name: file name to clean up
:param str replacement:
replacement string to use for sequences of problematic input, by default
an empty string to remove them entirely, each contiguous sequence of
problems is replaced by a single replacement
:rtype: str
"""
if WINDOWS_RESERVED.match(name):
return "Untitled"
return re.sub(r'[^\w_.()\[\] -]+', replacement, name).lstrip('.-') or "Untitled"
def listdir(dir, recursive=False):
"""Allow to recursively get the file listing following symlinks, returns
paths relative to the provided `dir` except completely broken if the symlink
it follows leaves `dir`...
"""
if not recursive:
_logger.getChild('listdir').warning("Deprecated: just call os.listdir...")
dir = os.path.normpath(dir)
if not recursive:
return os.listdir(dir)
res = []
for root, _, files in os.walk(dir, followlinks=True):
r = os.path.relpath(root, dir)
# FIXME: what should happen if root is outside dir?
yield from (opj(r, f) for f in files)
return res
def walksymlinks(top, topdown=True, onerror=None):
_logger.getChild('walksymlinks').warning("Deprecated: use os.walk(followlinks=True) instead")
return os.walk(top, topdown=topdown, onerror=onerror, followlinks=True)
@contextmanager
def tempdir():
_logger.getChild('tempdir').warning("Deprecated: use tempfile.TemporaryDirectory")
with tempfile.TemporaryDirectory() as d:
yield d
def zip_dir(path, stream, include_dir=True, fnct_sort=None): # TODO add ignore list
"""
: param fnct_sort : Function to be passed to "key" parameter of built-in
python sorted() to provide flexibility of sorting files
inside ZIP archive according to specific requirements.
"""
path = os.path.normpath(path)
len_prefix = len(os.path.dirname(path)) if include_dir else len(path)
if len_prefix:
len_prefix += 1
with zipfile.ZipFile(stream, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zipf:
for dirpath, dirnames, filenames in os.walk(path):
filenames = sorted(filenames, key=fnct_sort)
for fname in filenames:
bname, ext = os.path.splitext(fname)
ext = ext or bname
if ext not in ['.pyc', '.pyo', '.swp', '.DS_Store']:
path = os.path.normpath(os.path.join(dirpath, fname))
if os.path.isfile(path):
zipf.write(path, path[len_prefix:])
if os.name != 'nt':
getppid = os.getppid
is_running_as_nt_service = lambda: False
else:
import ctypes
import win32service as ws
import win32serviceutil as wsu
# based on http://mail.python.org/pipermail/python-win32/2007-June/006174.html
_TH32CS_SNAPPROCESS = 0x00000002
class _PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.c_ulong),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
def getppid():
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
hProcessSnap = CreateToolhelp32Snapshot(_TH32CS_SNAPPROCESS, 0)
current_pid = os.getpid()
try:
pe32 = _PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(_PROCESSENTRY32)
if not Process32First(hProcessSnap, ctypes.byref(pe32)):
raise OSError('Failed getting first process.')
while True:
if pe32.th32ProcessID == current_pid:
return pe32.th32ParentProcessID
if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
return None
finally:
CloseHandle(hProcessSnap)
from contextlib import contextmanager
from odoo.release import nt_service_name
def is_running_as_nt_service():
@contextmanager
def close_srv(srv):
try:
yield srv
finally:
ws.CloseServiceHandle(srv)
try:
with close_srv(ws.OpenSCManager(None, None, ws.SC_MANAGER_ALL_ACCESS)) as hscm:
with close_srv(wsu.SmartOpenService(hscm, nt_service_name, ws.SERVICE_ALL_ACCESS)) as hs:
info = ws.QueryServiceStatusEx(hs)
return info['ProcessId'] == getppid()
except Exception:
return False
if __name__ == '__main__':
from pprint import pprint as pp
pp(listdir('../report', True))
| 38.561404 | 6,594 |
5,973 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
"""Utilities for generating, parsing and checking XML/XSD files on top of the lxml.etree module."""
import base64
from io import BytesIO
from lxml import etree
from odoo.exceptions import UserError
class odoo_resolver(etree.Resolver):
"""Odoo specific file resolver that can be added to the XML Parser.
It will search filenames in the ir.attachments
"""
def __init__(self, env):
super().__init__()
self.env = env
def resolve(self, url, id, context):
"""Search url in ``ir.attachment`` and return the resolved content."""
attachment = self.env['ir.attachment'].search([('name', '=', url)])
if attachment:
return self.resolve_string(base64.b64decode(attachment.datas), context)
def _check_with_xsd(tree_or_str, stream, env=None):
"""Check an XML against an XSD schema.
This will raise a UserError if the XML file is not valid according to the
XSD file.
:param tree_or_str (etree, str): representation of the tree to be checked
:param stream (io.IOBase, str): the byte stream used to build the XSD schema.
If env is given, it can also be the name of an attachment in the filestore
:param env (odoo.api.Environment): If it is given, it enables resolving the
imports of the schema in the filestore with ir.attachments.
"""
if not isinstance(tree_or_str, etree._Element):
tree_or_str = etree.fromstring(tree_or_str)
parser = etree.XMLParser()
if env:
parser.resolvers.add(odoo_resolver(env))
if isinstance(stream, str) and stream.endswith('.xsd'):
attachment = env['ir.attachment'].search([('name', '=', stream)])
if not attachment:
raise FileNotFoundError()
stream = BytesIO(base64.b64decode(attachment.datas))
xsd_schema = etree.XMLSchema(etree.parse(stream, parser=parser))
try:
xsd_schema.assertValid(tree_or_str)
except etree.DocumentInvalid as xml_errors:
raise UserError('\n'.join(str(e) for e in xml_errors.error_log))
def create_xml_node_chain(first_parent_node, nodes_list, last_node_value=None):
"""Generate a hierarchical chain of nodes.
Each new node being the child of the previous one based on the tags contained
in `nodes_list`, under the given node `first_parent_node`.
:param first_parent_node (etree._Element): parent of the created tree/chain
:param nodes_list (iterable<str>): tag names to be created
:param last_node_value (str): if specified, set the last node's text to this value
:returns (list<etree._Element>): the list of created nodes
"""
res = []
current_node = first_parent_node
for tag in nodes_list:
current_node = etree.SubElement(current_node, tag)
res.append(current_node)
if last_node_value is not None:
current_node.text = last_node_value
return res
def create_xml_node(parent_node, node_name, node_value=None):
"""Create a new node.
:param parent_node (etree._Element): parent of the created node
:param node_name (str): name of the created node
:param node_value (str): value of the created node (optional)
:returns (etree._Element):
"""
return create_xml_node_chain(parent_node, [node_name], node_value)[0]
def cleanup_xml_node(xml_node_or_string, remove_blank_text=True, remove_blank_nodes=True, indent_level=0, indent_space=" "):
"""Clean up the sub-tree of the provided XML node.
If the provided XML node is of type:
- etree._Element, it is modified in-place.
- string/bytes, it is first parsed into an etree._Element
:param xml_node_or_string (etree._Element, str): XML node (or its string/bytes representation)
:param remove_blank_text (bool): if True, removes whitespace-only text from nodes
:param remove_blank_nodes (bool): if True, removes leaf nodes with no text (iterative, depth-first, done after remove_blank_text)
:param indent_level (int): depth or level of node within root tree (use -1 to leave indentation as-is)
:param indent_space (str): string to use for indentation (use '' to remove all indentation)
:returns (etree._Element): clean node, same instance that was received (if applicable)
"""
xml_node = xml_node_or_string
# Convert str/bytes to etree._Element
if isinstance(xml_node, str):
xml_node = xml_node.encode() # misnomer: fromstring actually reads bytes
if isinstance(xml_node, bytes):
xml_node = etree.fromstring(xml_node)
# Process leaf nodes iteratively
# Depth-first, so any inner node may become a leaf too (if children are removed)
def leaf_iter(parent_node, node, level):
for child_node in node:
leaf_iter(node, child_node, level if level < 0 else level + 1)
# Indentation
if level >= 0:
indent = '\n' + indent_space * level
if not node.tail or not node.tail.strip():
node.tail = '\n' if parent_node is None else indent
if len(node) > 0:
if not node.text or not node.text.strip():
# First child's indentation is parent's text
node.text = indent + indent_space
last_child = node[-1]
if last_child.tail == indent + indent_space:
# Last child's tail is parent's closing tag indentation
last_child.tail = indent
# Removal condition: node is leaf (not root nor inner node)
if parent_node is not None and len(node) == 0:
if remove_blank_text and node.text is not None and not node.text.strip():
# node.text is None iff node.tag is self-closing (text='' creates closing tag)
node.text = ''
if remove_blank_nodes and not (node.text or ''):
parent_node.remove(node)
leaf_iter(None, xml_node, indent_level)
return xml_node
| 43.282609 | 5,973 |
5,435 |
py
|
PYTHON
|
15.0
|
""" View validation code (using assertions, not the RNG schema). """
import ast
import collections
import logging
import os
import re
from lxml import etree
from odoo import tools
_logger = logging.getLogger(__name__)
_validators = collections.defaultdict(list)
_relaxng_cache = {}
READONLY = re.compile(r"\breadonly\b")
def _get_attrs_symbols():
""" Return a set of predefined symbols for evaluating attrs. """
return {
'True', 'False', 'None', # those are identifiers in Python 2.7
'self',
'id',
'uid',
'context',
'context_today',
'active_id',
'active_ids',
'allowed_company_ids',
'current_company_id',
'active_model',
'time',
'datetime',
'relativedelta',
'current_date',
'today',
'now',
'abs',
'len',
'bool',
'float',
'str',
'unicode',
}
def get_variable_names(expr):
""" Return the subexpressions of the kind "VARNAME(.ATTNAME)*" in the given
string or AST node.
"""
IGNORED = _get_attrs_symbols()
names = set()
def get_name_seq(node):
if isinstance(node, ast.Name):
return [node.id]
elif isinstance(node, ast.Attribute):
left = get_name_seq(node.value)
return left and left + [node.attr]
def process(node):
seq = get_name_seq(node)
if seq and seq[0] not in IGNORED:
names.add('.'.join(seq))
else:
for child in ast.iter_child_nodes(node):
process(child)
if isinstance(expr, str):
expr = ast.parse(expr.strip(), mode='eval').body
process(expr)
return names
def get_dict_asts(expr):
""" Check that the given string or AST node represents a dict expression
where all keys are string literals, and return it as a dict mapping string
keys to the AST of values.
"""
if isinstance(expr, str):
expr = ast.parse(expr.strip(), mode='eval').body
if not isinstance(expr, ast.Dict):
raise ValueError("Non-dict expression")
if not all(isinstance(key, ast.Str) for key in expr.keys):
raise ValueError("Non-string literal dict key")
return {key.s: val for key, val in zip(expr.keys, expr.values)}
def _check(condition, explanation):
if not condition:
raise ValueError("Expression is not a valid domain: %s" % explanation)
def get_domain_identifiers(expr):
""" Check that the given string or AST node represents a domain expression,
and return a pair of sets ``(fields, vars)`` where ``fields`` are the field
names on the left-hand side of conditions, and ``vars`` are the variable
names on the right-hand side of conditions.
"""
if not expr: # case of expr=""
return (set(), set())
if isinstance(expr, str):
expr = ast.parse(expr.strip(), mode='eval').body
fnames = set()
vnames = set()
if isinstance(expr, ast.List):
for elem in expr.elts:
if isinstance(elem, ast.Str):
# note: this doesn't check the and/or structure
_check(elem.s in ('&', '|', '!'),
f"logical operators should be '&', '|', or '!', found {elem.s!r}")
continue
if not isinstance(elem, (ast.List, ast.Tuple)):
continue
_check(len(elem.elts) == 3,
f"segments should have 3 elements, found {len(elem.elts)}")
lhs, operator, rhs = elem.elts
_check(isinstance(operator, ast.Str),
f"operator should be a string, found {type(operator).__name__}")
if isinstance(lhs, ast.Str):
fnames.add(lhs.s)
vnames.update(get_variable_names(expr))
return (fnames, vnames)
def valid_view(arch, **kwargs):
for pred in _validators[arch.tag]:
check = pred(arch, **kwargs)
if not check:
_logger.error("Invalid XML: %s", pred.__doc__)
return False
if check == "Warning":
_logger.warning("Invalid XML: %s", pred.__doc__)
return "Warning"
return True
def validate(*view_types):
""" Registers a view-validation function for the specific view types
"""
def decorator(fn):
for arch in view_types:
_validators[arch].append(fn)
return fn
return decorator
def relaxng(view_type):
""" Return a validator for the given view type, or None. """
if view_type not in _relaxng_cache:
with tools.file_open(os.path.join('base', 'rng', '%s_view.rng' % view_type)) as frng:
try:
relaxng_doc = etree.parse(frng)
_relaxng_cache[view_type] = etree.RelaxNG(relaxng_doc)
except Exception:
_logger.exception('Failed to load RelaxNG XML schema for views validation')
_relaxng_cache[view_type] = None
return _relaxng_cache[view_type]
@validate('calendar', 'graph', 'pivot', 'search', 'tree', 'activity')
def schema_valid(arch, **kwargs):
""" Get RNG validator and validate RNG file."""
validator = relaxng(arch.tag)
if validator and not validator.validate(arch):
result = True
for error in validator.error_log:
_logger.error(tools.ustr(error))
result = False
return result
return True
| 29.862637 | 5,435 |
15,111 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
safe_eval module - methods intended to provide more restricted alternatives to
evaluate simple and/or untrusted code.
Methods in this module are typically used as alternatives to eval() to parse
OpenERP domain strings, conditions and expressions, mostly based on locals
condition/math builtins.
"""
# Module partially ripped from/inspired by several different sources:
# - http://code.activestate.com/recipes/286134/
# - safe_eval in lp:~xrg/openobject-server/optimize-5.0
# - safe_eval in tryton http://hg.tryton.org/hgwebdir.cgi/trytond/rev/bbb5f73319ad
import dis
import functools
import logging
import types
from opcode import HAVE_ARGUMENT, opmap, opname
from types import CodeType
import werkzeug
from psycopg2 import OperationalError
from .misc import ustr
import odoo
unsafe_eval = eval
__all__ = ['test_expr', 'safe_eval', 'const_eval']
# The time module is usually already provided in the safe_eval environment
# but some code, e.g. datetime.datetime.now() (Windows/Python 2.5.2, bug
# lp:703841), does import time.
_ALLOWED_MODULES = ['_strptime', 'math', 'time']
_UNSAFE_ATTRIBUTES = ['f_builtins', 'f_globals', 'f_locals', 'gi_frame', 'gi_code',
'co_code', 'func_globals']
def to_opcodes(opnames, _opmap=opmap):
for x in opnames:
if x in _opmap:
yield _opmap[x]
# opcodes which absolutely positively must not be usable in safe_eval,
# explicitly subtracted from all sets of valid opcodes just in case
_BLACKLIST = set(to_opcodes([
# can't provide access to accessing arbitrary modules
'IMPORT_STAR', 'IMPORT_NAME', 'IMPORT_FROM',
# could allow replacing or updating core attributes on models & al, setitem
# can be used to set field values
'STORE_ATTR', 'DELETE_ATTR',
# no reason to allow this
'STORE_GLOBAL', 'DELETE_GLOBAL',
]))
# opcodes necessary to build literal values
_CONST_OPCODES = set(to_opcodes([
# stack manipulations
'POP_TOP', 'ROT_TWO', 'ROT_THREE', 'ROT_FOUR', 'DUP_TOP', 'DUP_TOP_TWO',
'LOAD_CONST',
'RETURN_VALUE', # return the result of the literal/expr evaluation
# literal collections
'BUILD_LIST', 'BUILD_MAP', 'BUILD_TUPLE', 'BUILD_SET',
# 3.6: literal map with constant keys https://bugs.python.org/issue27140
'BUILD_CONST_KEY_MAP',
'LIST_EXTEND', 'SET_UPDATE',
])) - _BLACKLIST
# operations which are both binary and inplace, same order as in doc'
_operations = [
'POWER', 'MULTIPLY', # 'MATRIX_MULTIPLY', # matrix operator (3.5+)
'FLOOR_DIVIDE', 'TRUE_DIVIDE', 'MODULO', 'ADD',
'SUBTRACT', 'LSHIFT', 'RSHIFT', 'AND', 'XOR', 'OR',
]
# operations on literal values
_EXPR_OPCODES = _CONST_OPCODES.union(to_opcodes([
'UNARY_POSITIVE', 'UNARY_NEGATIVE', 'UNARY_NOT', 'UNARY_INVERT',
*('BINARY_' + op for op in _operations), 'BINARY_SUBSCR',
*('INPLACE_' + op for op in _operations),
'BUILD_SLICE',
# comprehensions
'LIST_APPEND', 'MAP_ADD', 'SET_ADD',
'COMPARE_OP',
# specialised comparisons
'IS_OP', 'CONTAINS_OP',
'DICT_MERGE', 'DICT_UPDATE',
# Basically used in any "generator literal"
'GEN_START', # added in 3.10 but already removed from 3.11.
])) - _BLACKLIST
_SAFE_OPCODES = _EXPR_OPCODES.union(to_opcodes([
'POP_BLOCK', 'POP_EXCEPT',
# note: removed in 3.8
'SETUP_LOOP', 'SETUP_EXCEPT', 'BREAK_LOOP', 'CONTINUE_LOOP',
'EXTENDED_ARG', # P3.6 for long jump offsets.
'MAKE_FUNCTION', 'CALL_FUNCTION', 'CALL_FUNCTION_KW', 'CALL_FUNCTION_EX',
# Added in P3.7 https://bugs.python.org/issue26110
'CALL_METHOD', 'LOAD_METHOD',
'GET_ITER', 'FOR_ITER', 'YIELD_VALUE',
'JUMP_FORWARD', 'JUMP_ABSOLUTE',
'JUMP_IF_FALSE_OR_POP', 'JUMP_IF_TRUE_OR_POP', 'POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',
'SETUP_FINALLY', 'END_FINALLY',
# Added in 3.8 https://bugs.python.org/issue17611
'BEGIN_FINALLY', 'CALL_FINALLY', 'POP_FINALLY',
'RAISE_VARARGS', 'LOAD_NAME', 'STORE_NAME', 'DELETE_NAME', 'LOAD_ATTR',
'LOAD_FAST', 'STORE_FAST', 'DELETE_FAST', 'UNPACK_SEQUENCE',
'STORE_SUBSCR',
'LOAD_GLOBAL',
'RERAISE', 'JUMP_IF_NOT_EXC_MATCH',
])) - _BLACKLIST
_logger = logging.getLogger(__name__)
def assert_no_dunder_name(code_obj, expr):
""" assert_no_dunder_name(code_obj, expr) -> None
Asserts that the code object does not refer to any "dunder name"
(__$name__), so that safe_eval prevents access to any internal-ish Python
attribute or method (both are loaded via LOAD_ATTR which uses a name, not a
const or a var).
Checks that no such name exists in the provided code object (co_names).
:param code_obj: code object to name-validate
:type code_obj: CodeType
:param str expr: expression corresponding to the code object, for debugging
purposes
:raises NameError: in case a forbidden name (containing two underscores)
is found in ``code_obj``
.. note:: actually forbids every name containing 2 underscores
"""
for name in code_obj.co_names:
if "__" in name or name in _UNSAFE_ATTRIBUTES:
raise NameError('Access to forbidden name %r (%r)' % (name, expr))
def assert_valid_codeobj(allowed_codes, code_obj, expr):
""" Asserts that the provided code object validates against the bytecode
and name constraints.
Recursively validates the code objects stored in its co_consts in case
lambdas are being created/used (lambdas generate their own separated code
objects and don't live in the root one)
:param allowed_codes: list of permissible bytecode instructions
:type allowed_codes: set(int)
:param code_obj: code object to name-validate
:type code_obj: CodeType
:param str expr: expression corresponding to the code object, for debugging
purposes
:raises ValueError: in case of forbidden bytecode in ``code_obj``
:raises NameError: in case a forbidden name (containing two underscores)
is found in ``code_obj``
"""
assert_no_dunder_name(code_obj, expr)
# set operations are almost twice as fast as a manual iteration + condition
# when loading /web according to line_profiler
code_codes = {i.opcode for i in dis.get_instructions(code_obj)}
if not allowed_codes >= code_codes:
raise ValueError("forbidden opcode(s) in %r: %s" % (expr, ', '.join(opname[x] for x in (code_codes - allowed_codes))))
for const in code_obj.co_consts:
if isinstance(const, CodeType):
assert_valid_codeobj(allowed_codes, const, 'lambda')
def test_expr(expr, allowed_codes, mode="eval"):
"""test_expr(expression, allowed_codes[, mode]) -> code_object
Test that the expression contains only the allowed opcodes.
If the expression is valid and contains only allowed codes,
return the compiled code object.
Otherwise raise a ValueError, a Syntax Error or TypeError accordingly.
"""
try:
if mode == 'eval':
# eval() does not like leading/trailing whitespace
expr = expr.strip()
code_obj = compile(expr, "", mode)
except (SyntaxError, TypeError, ValueError):
raise
except Exception as e:
raise ValueError('"%s" while compiling\n%r' % (ustr(e), expr))
assert_valid_codeobj(allowed_codes, code_obj, expr)
return code_obj
def const_eval(expr):
"""const_eval(expression) -> value
Safe Python constant evaluation
Evaluates a string that contains an expression describing
a Python constant. Strings that are not valid Python expressions
or that contain other code besides the constant raise ValueError.
>>> const_eval("10")
10
>>> const_eval("[1,2, (3,4), {'foo':'bar'}]")
[1, 2, (3, 4), {'foo': 'bar'}]
>>> const_eval("1+2")
Traceback (most recent call last):
...
ValueError: opcode BINARY_ADD not allowed
"""
c = test_expr(expr, _CONST_OPCODES)
return unsafe_eval(c)
def expr_eval(expr):
"""expr_eval(expression) -> value
Restricted Python expression evaluation
Evaluates a string that contains an expression that only
uses Python constants. This can be used to e.g. evaluate
a numerical expression from an untrusted source.
>>> expr_eval("1+2")
3
>>> expr_eval("[1,2]*2")
[1, 2, 1, 2]
>>> expr_eval("__import__('sys').modules")
Traceback (most recent call last):
...
ValueError: opcode LOAD_NAME not allowed
"""
c = test_expr(expr, _EXPR_OPCODES)
return unsafe_eval(c)
def _import(name, globals=None, locals=None, fromlist=None, level=-1):
if globals is None:
globals = {}
if locals is None:
locals = {}
if fromlist is None:
fromlist = []
if name in _ALLOWED_MODULES:
return __import__(name, globals, locals, level)
raise ImportError(name)
_BUILTINS = {
'__import__': _import,
'True': True,
'False': False,
'None': None,
'bytes': bytes,
'str': str,
'unicode': str,
'bool': bool,
'int': int,
'float': float,
'enumerate': enumerate,
'dict': dict,
'list': list,
'tuple': tuple,
'map': map,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'reduce': functools.reduce,
'filter': filter,
'sorted': sorted,
'round': round,
'len': len,
'repr': repr,
'set': set,
'all': all,
'any': any,
'ord': ord,
'chr': chr,
'divmod': divmod,
'isinstance': isinstance,
'range': range,
'xrange': range,
'zip': zip,
'Exception': Exception,
}
def safe_eval(expr, globals_dict=None, locals_dict=None, mode="eval", nocopy=False, locals_builtins=False):
"""safe_eval(expression[, globals[, locals[, mode[, nocopy]]]]) -> result
System-restricted Python expression evaluation
Evaluates a string that contains an expression that mostly
uses Python constants, arithmetic expressions and the
objects directly provided in context.
This can be used to e.g. evaluate
an OpenERP domain expression from an untrusted source.
:throws TypeError: If the expression provided is a code object
:throws SyntaxError: If the expression provided is not valid Python
:throws NameError: If the expression provided accesses forbidden names
:throws ValueError: If the expression provided uses forbidden bytecode
"""
if type(expr) is CodeType:
raise TypeError("safe_eval does not allow direct evaluation of code objects.")
# prevent altering the globals/locals from within the sandbox
# by taking a copy.
if not nocopy:
# isinstance() does not work below, we want *exactly* the dict class
if (globals_dict is not None and type(globals_dict) is not dict) \
or (locals_dict is not None and type(locals_dict) is not dict):
_logger.warning(
"Looks like you are trying to pass a dynamic environment, "
"you should probably pass nocopy=True to safe_eval().")
if globals_dict is not None:
globals_dict = dict(globals_dict)
if locals_dict is not None:
locals_dict = dict(locals_dict)
check_values(globals_dict)
check_values(locals_dict)
if globals_dict is None:
globals_dict = {}
globals_dict['__builtins__'] = _BUILTINS
if locals_builtins:
if locals_dict is None:
locals_dict = {}
locals_dict.update(_BUILTINS)
c = test_expr(expr, _SAFE_OPCODES, mode=mode)
try:
return unsafe_eval(c, globals_dict, locals_dict)
except odoo.exceptions.UserError:
raise
except odoo.exceptions.RedirectWarning:
raise
except werkzeug.exceptions.HTTPException:
raise
except odoo.http.AuthenticationError:
raise
except OperationalError:
# Do not hide PostgreSQL low-level exceptions, to let the auto-replay
# of serialized transactions work its magic
raise
except ZeroDivisionError:
raise
except Exception as e:
raise ValueError('%s: "%s" while evaluating\n%r' % (ustr(type(e)), ustr(e), expr))
def test_python_expr(expr, mode="eval"):
try:
test_expr(expr, _SAFE_OPCODES, mode=mode)
except (SyntaxError, TypeError, ValueError) as err:
if len(err.args) >= 2 and len(err.args[1]) >= 4:
error = {
'message': err.args[0],
'filename': err.args[1][0],
'lineno': err.args[1][1],
'offset': err.args[1][2],
'error_line': err.args[1][3],
}
msg = "%s : %s at line %d\n%s" % (type(err).__name__, error['message'], error['lineno'], error['error_line'])
else:
msg = ustr(err)
return msg
return False
def check_values(d):
if not d:
return d
for v in d.values():
if isinstance(v, types.ModuleType):
raise TypeError(f"""Module {v} can not be used in evaluation contexts
Prefer providing only the items necessary for your intended use.
If a "module" is necessary for backwards compatibility, use
`odoo.tools.safe_eval.wrap_module` to generate a wrapper recursively
whitelisting allowed attributes.
Pre-wrapped modules are provided as attributes of `odoo.tools.safe_eval`.
""")
return d
class wrap_module:
def __init__(self, module, attributes):
"""Helper for wrapping a package/module to expose selected attributes
:param module: the actual package/module to wrap, as returned by ``import <module>``
:param iterable attributes: attributes to expose / whitelist. If a dict,
the keys are the attributes and the values
are used as an ``attributes`` in case the
corresponding item is a submodule
"""
# builtin modules don't have a __file__ at all
modfile = getattr(module, '__file__', '(built-in)')
self._repr = f"<wrapped {module.__name__!r} ({modfile})>"
for attrib in attributes:
target = getattr(module, attrib)
if isinstance(target, types.ModuleType):
target = wrap_module(target, attributes[attrib])
setattr(self, attrib, target)
def __repr__(self):
return self._repr
# dateutil submodules are lazy so need to import them for them to "exist"
import dateutil
mods = ['parser', 'relativedelta', 'rrule', 'tz']
for mod in mods:
__import__('dateutil.%s' % mod)
datetime = wrap_module(__import__('datetime'), ['date', 'datetime', 'time', 'timedelta', 'timezone', 'tzinfo', 'MAXYEAR', 'MINYEAR'])
dateutil = wrap_module(dateutil, {
mod: getattr(dateutil, mod).__all__
for mod in mods
})
json = wrap_module(__import__('json'), ['loads', 'dumps'])
time = wrap_module(__import__('time'), ['time', 'strptime', 'strftime', 'sleep'])
pytz = wrap_module(__import__('pytz'), [
'utc', 'UTC', 'timezone',
])
| 35.893112 | 15,111 |
13,078 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# pylint: disable=sql-injection
import logging
import psycopg2
_schema = logging.getLogger('odoo.schema')
_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def existing_tables(cr, tablenames):
""" Return the names of existing tables among ``tablenames``. """
query = """
SELECT c.relname
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE c.relname IN %s
AND c.relkind IN ('r', 'v', 'm')
AND n.nspname = current_schema
"""
cr.execute(query, [tuple(tablenames)])
return [row[0] for row in cr.fetchall()]
def table_exists(cr, tablename):
""" Return whether the given table exists. """
return len(existing_tables(cr, {tablename})) == 1
def table_kind(cr, tablename):
""" Return the kind of a table: ``'r'`` (regular table), ``'v'`` (view),
``'f'`` (foreign table), ``'t'`` (temporary table),
``'m'`` (materialized view), or ``None``.
"""
query = """
SELECT c.relkind
FROM pg_class c
JOIN pg_namespace n ON (n.oid = c.relnamespace)
WHERE c.relname = %s
AND n.nspname = current_schema
"""
cr.execute(query, (tablename,))
return cr.fetchone()[0] if cr.rowcount else None
def create_model_table(cr, tablename, comment=None, columns=()):
""" Create the table for a model. """
colspecs = ['id SERIAL NOT NULL'] + [
'"{}" {}'.format(columnname, columntype)
for columnname, columntype, columncomment in columns
]
cr.execute('CREATE TABLE "{}" ({}, PRIMARY KEY(id))'.format(tablename, ", ".join(colspecs)))
queries, params = [], []
if comment:
queries.append('COMMENT ON TABLE "{}" IS %s'.format(tablename))
params.append(comment)
for columnname, columntype, columncomment in columns:
queries.append('COMMENT ON COLUMN "{}"."{}" IS %s'.format(tablename, columnname))
params.append(columncomment)
if queries:
cr.execute("; ".join(queries), params)
_schema.debug("Table %r: created", tablename)
def table_columns(cr, tablename):
""" Return a dict mapping column names to their configuration. The latter is
a dict with the data from the table ``information_schema.columns``.
"""
# Do not select the field `character_octet_length` from `information_schema.columns`
# because specific access right restriction in the context of shared hosting (Heroku, OVH, ...)
# might prevent a postgres user to read this field.
query = '''SELECT column_name, udt_name, character_maximum_length, is_nullable
FROM information_schema.columns WHERE table_name=%s'''
cr.execute(query, (tablename,))
return {row['column_name']: row for row in cr.dictfetchall()}
def column_exists(cr, tablename, columnname):
""" Return whether the given column exists. """
query = """ SELECT 1 FROM information_schema.columns
WHERE table_name=%s AND column_name=%s """
cr.execute(query, (tablename, columnname))
return cr.rowcount
def create_column(cr, tablename, columnname, columntype, comment=None):
""" Create a column with the given type. """
coldefault = (columntype.upper()=='BOOLEAN') and 'DEFAULT false' or ''
cr.execute('ALTER TABLE "{}" ADD COLUMN "{}" {} {}'.format(tablename, columnname, columntype, coldefault))
if comment:
cr.execute('COMMENT ON COLUMN "{}"."{}" IS %s'.format(tablename, columnname), (comment,))
_schema.debug("Table %r: added column %r of type %s", tablename, columnname, columntype)
def rename_column(cr, tablename, columnname1, columnname2):
""" Rename the given column. """
cr.execute('ALTER TABLE "{}" RENAME COLUMN "{}" TO "{}"'.format(tablename, columnname1, columnname2))
_schema.debug("Table %r: renamed column %r to %r", tablename, columnname1, columnname2)
def convert_column(cr, tablename, columnname, columntype):
""" Convert the column to the given type. """
try:
with cr.savepoint(flush=False):
cr.execute('ALTER TABLE "{}" ALTER COLUMN "{}" TYPE {}'.format(tablename, columnname, columntype),
log_exceptions=False)
except psycopg2.NotSupportedError:
# can't do inplace change -> use a casted temp column
query = '''
ALTER TABLE "{0}" RENAME COLUMN "{1}" TO __temp_type_cast;
ALTER TABLE "{0}" ADD COLUMN "{1}" {2};
UPDATE "{0}" SET "{1}"= __temp_type_cast::{2};
ALTER TABLE "{0}" DROP COLUMN __temp_type_cast CASCADE;
'''
cr.execute(query.format(tablename, columnname, columntype))
_schema.debug("Table %r: column %r changed to type %s", tablename, columnname, columntype)
def set_not_null(cr, tablename, columnname):
""" Add a NOT NULL constraint on the given column. """
query = 'ALTER TABLE "{}" ALTER COLUMN "{}" SET NOT NULL'.format(tablename, columnname)
try:
with cr.savepoint(flush=False):
cr.execute(query, log_exceptions=False)
_schema.debug("Table %r: column %r: added constraint NOT NULL", tablename, columnname)
except Exception:
raise Exception("Table %r: unable to set NOT NULL on column %r", tablename, columnname)
def drop_not_null(cr, tablename, columnname):
""" Drop the NOT NULL constraint on the given column. """
cr.execute('ALTER TABLE "{}" ALTER COLUMN "{}" DROP NOT NULL'.format(tablename, columnname))
_schema.debug("Table %r: column %r: dropped constraint NOT NULL", tablename, columnname)
def constraint_definition(cr, tablename, constraintname):
""" Return the given constraint's definition. """
query = """
SELECT COALESCE(d.description, pg_get_constraintdef(c.oid))
FROM pg_constraint c
JOIN pg_class t ON t.oid = c.conrelid
LEFT JOIN pg_description d ON c.oid = d.objoid
WHERE t.relname = %s AND conname = %s;"""
cr.execute(query, (tablename, constraintname))
return cr.fetchone()[0] if cr.rowcount else None
def add_constraint(cr, tablename, constraintname, definition):
""" Add a constraint on the given table. """
query1 = 'ALTER TABLE "{}" ADD CONSTRAINT "{}" {}'.format(tablename, constraintname, definition)
query2 = 'COMMENT ON CONSTRAINT "{}" ON "{}" IS %s'.format(constraintname, tablename)
try:
with cr.savepoint(flush=False):
cr.execute(query1, log_exceptions=False)
cr.execute(query2, (definition,), log_exceptions=False)
_schema.debug("Table %r: added constraint %r as %s", tablename, constraintname, definition)
except Exception:
raise Exception("Table %r: unable to add constraint %r as %s", tablename, constraintname, definition)
def drop_constraint(cr, tablename, constraintname):
""" drop the given constraint. """
try:
with cr.savepoint(flush=False):
cr.execute('ALTER TABLE "{}" DROP CONSTRAINT "{}"'.format(tablename, constraintname))
_schema.debug("Table %r: dropped constraint %r", tablename, constraintname)
except Exception:
_schema.warning("Table %r: unable to drop constraint %r!", tablename, constraintname)
def add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
""" Create the given foreign key, and return ``True``. """
query = 'ALTER TABLE "{}" ADD FOREIGN KEY ("{}") REFERENCES "{}"("{}") ON DELETE {}'
cr.execute(query.format(tablename1, columnname1, tablename2, columnname2, ondelete))
_schema.debug("Table %r: added foreign key %r references %r(%r) ON DELETE %s",
tablename1, columnname1, tablename2, columnname2, ondelete)
return True
def get_foreign_keys(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
cr.execute(
"""
SELECT fk.conname as name
FROM pg_constraint AS fk
JOIN pg_class AS c1 ON fk.conrelid = c1.oid
JOIN pg_class AS c2 ON fk.confrelid = c2.oid
JOIN pg_attribute AS a1 ON a1.attrelid = c1.oid AND fk.conkey[1] = a1.attnum
JOIN pg_attribute AS a2 ON a2.attrelid = c2.oid AND fk.confkey[1] = a2.attnum
WHERE fk.contype = 'f'
AND c1.relname = %s
AND a1.attname = %s
AND c2.relname = %s
AND a2.attname = %s
AND fk.confdeltype = %s
""", [tablename1, columnname1, tablename2, columnname2, _CONFDELTYPES[ondelete.upper()]]
)
return [r[0] for r in cr.fetchall()]
def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
""" Update the foreign keys between tables to match the given one, and
return ``True`` if the given foreign key has been recreated.
"""
# Do not use 'information_schema' here, as those views are awfully slow!
deltype = _CONFDELTYPES.get(ondelete.upper(), 'a')
query = """ SELECT con.conname, c2.relname, a2.attname, con.confdeltype as deltype
FROM pg_constraint as con, pg_class as c1, pg_class as c2,
pg_attribute as a1, pg_attribute as a2
WHERE con.contype='f' AND con.conrelid=c1.oid AND con.confrelid=c2.oid
AND array_lower(con.conkey, 1)=1 AND con.conkey[1]=a1.attnum
AND array_lower(con.confkey, 1)=1 AND con.confkey[1]=a2.attnum
AND a1.attrelid=c1.oid AND a2.attrelid=c2.oid
AND c1.relname=%s AND a1.attname=%s """
cr.execute(query, (tablename1, columnname1))
found = False
for fk in cr.fetchall():
if not found and fk[1:] == (tablename2, columnname2, deltype):
found = True
else:
drop_constraint(cr, tablename1, fk[0])
if not found:
return add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete)
def index_exists(cr, indexname):
""" Return whether the given index exists. """
cr.execute("SELECT 1 FROM pg_indexes WHERE indexname=%s", (indexname,))
return cr.rowcount
def create_index(cr, indexname, tablename, expressions):
""" Create the given index unless it exists. """
if index_exists(cr, indexname):
return
args = ', '.join(expressions)
cr.execute('CREATE INDEX "{}" ON "{}" ({})'.format(indexname, tablename, args))
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, args)
def create_unique_index(cr, indexname, tablename, expressions):
""" Create the given index unless it exists. """
if index_exists(cr, indexname):
return
args = ', '.join(expressions)
cr.execute('CREATE UNIQUE INDEX "{}" ON "{}" ({})'.format(indexname, tablename, args))
_schema.debug("Table %r: created index %r (%s)", tablename, indexname, args)
def drop_index(cr, indexname, tablename):
""" Drop the given index if it exists. """
cr.execute('DROP INDEX IF EXISTS "{}"'.format(indexname))
_schema.debug("Table %r: dropped index %r", tablename, indexname)
def drop_view_if_exists(cr, viewname):
kind = table_kind(cr, viewname)
if kind == 'v':
cr.execute("DROP VIEW {} CASCADE".format(viewname))
elif kind == 'm':
cr.execute("DROP MATERIALIZED VIEW {} CASCADE".format(viewname))
def escape_psql(to_escape):
return to_escape.replace('\\', r'\\').replace('%', '\%').replace('_', '\_')
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise ValueError("VARCHAR parameter should be an int, got %s" % type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
def reverse_order(order):
""" Reverse an ORDER BY clause """
items = []
for item in order.split(','):
item = item.lower().split()
direction = 'asc' if item[1:] == ['desc'] else 'desc'
items.append('%s %s' % (item[0], direction))
return ', '.join(items)
def increment_field_skiplock(record, field):
"""
Increment 'friendly' the [field] of the current [record](s)
If record is locked, we just skip the update.
It doesn't invalidate the cache since the update is not critical.
:rtype: bool - if field has been incremented or not
"""
if not record:
return False
assert record._fields[field].type == 'integer'
cr = record._cr
query = """
UPDATE {table} SET {field} = {field} + 1 WHERE id IN (
SELECT id from {table} WHERE id in %(ids)s FOR UPDATE SKIP LOCKED
) RETURNING id
""".format(table=record._table, field=field)
cr.execute(query, {'ids': tuple(record.ids)})
return bool(cr.fetchone())
| 43.304636 | 13,078 |
6,723 |
py
|
PYTHON
|
15.0
|
#!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
ENOENT = 2
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
path = next(which_files(file, mode, path, pathext), None)
if path is None:
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
return path
if __name__ == '__main__':
import doctest
doctest.testmod()
| 45.120805 | 6,723 |
8,075 |
py
|
PYTHON
|
15.0
|
import random
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.tools import pycompat
def Random(seed):
""" Return a random number generator object with the given seed. """
r = random.Random()
r.seed(seed, version=2)
return r
def format_str(val, counter, values):
""" Format the given value (with method ``format``) when it is a string. """
if isinstance(val, str):
return val.format(counter=counter, values=values)
return val
def chain_factories(field_factories, model_name):
""" Instantiate a generator by calling all the field factories. """
generator = root_factory()
for (fname, field_factory) in field_factories:
generator = field_factory(generator, fname, model_name)
return generator
def root_factory():
""" Return a generator with empty values dictionaries (except for the flag ``__complete``). """
yield {'__complete': False}
while True:
yield {'__complete': True}
def randomize(vals, weights=None, seed=False, formatter=format_str, counter_offset=0):
""" Return a factory for an iterator of values dicts with pseudo-randomly
chosen values (among ``vals``) for a field.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param int counter_offset:
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def generate(iterator, field_name, model_name):
r = Random('%s+field+%s' % (model_name, seed or field_name))
for counter, values in enumerate(iterator):
val = r.choices(vals, weights)[0]
values[field_name] = formatter(val, counter + counter_offset, values)
yield values
return generate
def cartesian(vals, weights=None, seed=False, formatter=format_str, then=None):
""" Return a factory for an iterator of values dicts that combines all ``vals`` for
the field with the other field values in input.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param function then: if defined, factory used when vals has been consumed.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def generate(iterator, field_name, model_name):
counter = 0
for values in iterator:
if values['__complete']:
break # will consume and lose an element, (complete so a filling element). If it is a problem, use peekable instead.
for val in vals:
yield {**values, field_name: formatter(val, counter, values)}
counter += 1
factory = then or randomize(vals, weights, seed, formatter, counter)
yield from factory(iterator, field_name, model_name)
return generate
def iterate(vals, weights=None, seed=False, formatter=format_str, then=None):
""" Return a factory for an iterator of values dicts that picks a value among ``vals``
for each input. Once all ``vals`` have been used once, resume as ``then`` or as a
``randomize`` generator.
:param list vals: list in which a value will be chosen, depending on `weights`
:param list weights: list of probabilistic weights
:param seed: optional initialization of the random number generator
:param function formatter: (val, counter, values) --> formatted_value
:param function then: if defined, factory used when vals has been consumed.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def generate(iterator, field_name, model_name):
counter = 0
for val in vals: # iteratable order is important, shortest first
values = next(iterator)
values[field_name] = formatter(val, counter, values)
values['__complete'] = False
yield values
counter += 1
factory = then or randomize(vals, weights, seed, formatter, counter)
yield from factory(iterator, field_name, model_name)
return generate
def constant(val, formatter=format_str):
""" Return a factory for an iterator of values dicts that sets the field
to the given value in each input dict.
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def generate(iterator, field_name, _):
for counter, values in enumerate(iterator):
values[field_name] = formatter(val, counter, values)
yield values
return generate
def compute(function, seed=None):
""" Return a factory for an iterator of values dicts that computes the field value
as ``function(values, counter, random)``, where ``values`` is the other field values,
``counter`` is an integer, and ``random`` is a pseudo-random number generator.
:param function function: (values, counter, random) --> field_values
:param seed: optional initialization of the random number generator
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def generate(iterator, field_name, model_name):
r = Random('%s+field+%s' % (model_name, seed or field_name))
for counter, values in enumerate(iterator):
val = function(values=values, counter=counter, random=r)
values[field_name] = val
yield values
return generate
def randint(a, b, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random integer between a and b included in each input dict.
:param int a: minimal random value
:param int b: maximal random value
:returns: function of the form (iterator, field_name, model_name) -> values
:rtype: function (iterator, str, str) -> dict
"""
def get_rand_int(random=None, **kwargs):
return random.randint(a, b)
return compute(get_rand_int, seed=seed)
def randfloat(a, b, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random float between a and b included in each input dict.
"""
def get_rand_float(random=None, **kwargs):
return random.uniform(a, b)
return compute(get_rand_float, seed=seed)
def randdatetime(*, base_date=None, relative_before=None, relative_after=None, seed=None):
""" Return a factory for an iterator of values dicts that sets the field
to a random datetime between relative_before and relative_after, relatively to
base_date
:param base_date (datetime): override the default base date if needed.
:param relative_after (relativedelta, timedelta): range up which we can go after the
base date. If not set, defaults to 0, i.e. only in the past of reference.
:param relative_before (relativedelta, timedelta): range up which we can go before the
base date. If not set, defaults to 0, i.e. only in the future of reference.
:return (generator): iterator for random dates inside the defined range
"""
base_date = base_date or datetime(2020, 1, 1)
seconds_before = relative_before and ((base_date + relative_before) - base_date).total_seconds() or 0
seconds_after = relative_after and ((base_date + relative_after) - base_date).total_seconds() or 0
def get_rand_datetime(random=None, **kwargs):
return base_date + relativedelta(seconds=random.randint(int(seconds_before), int(seconds_after)))
return compute(get_rand_datetime, seed=seed)
| 45.111732 | 8,075 |
24,711 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
import gc
import json
import logging
import sys
import time
import threading
import re
import functools
from psycopg2 import sql
from odoo import tools
_logger = logging.getLogger(__name__)
# ensure we have a non patched time for profiling times when using freezegun
real_datetime_now = datetime.now
real_time = time.time.__call__
def _format_frame(frame):
code = frame.f_code
return (code.co_filename, frame.f_lineno, code.co_name, '')
def _format_stack(stack):
return [list(frame) for frame in stack]
def get_current_frame(thread=None):
if thread:
frame = sys._current_frames()[thread.ident]
else:
frame = sys._getframe()
while frame.f_code.co_filename == __file__:
frame = frame.f_back
return frame
def _get_stack_trace(frame, limit_frame=None):
stack = []
while frame is not None and frame != limit_frame:
stack.append(_format_frame(frame))
frame = frame.f_back
if frame is None and limit_frame:
_logger.error("Limit frame was not found")
return list(reversed(stack))
def stack_size():
frame = get_current_frame()
size = 0
while frame:
size += 1
frame = frame.f_back
return size
def make_session(name=''):
return f'{real_datetime_now():%Y-%m-%d %H:%M:%S} {name}'
def force_hook():
"""
Force periodic profiling collectors to generate some stack trace. This is
useful before long calls that do not release the GIL, so that the time
spent in those calls is attributed to a specific stack trace, instead of
some arbitrary former frame.
"""
thread = threading.current_thread()
for func in getattr(thread, 'profile_hooks', ()):
func()
class Collector:
"""
Base class for objects that collect profiling data.
A collector object is used by a profiler to collect profiling data, most
likely a list of stack traces with time and some context information added
by ExecutionContext decorator on current thread.
This is a generic implementation of a basic collector, to be inherited.
It defines default behaviors for creating an entry in the collector.
"""
name = None # symbolic name of the collector
_registry = {} # map collector names to their class
@classmethod
def __init_subclass__(cls):
if cls.name:
cls._registry[cls.name] = cls
cls._registry[cls.__name__] = cls
@classmethod
def make(cls, name, *args, **kwargs):
""" Instantiate a collector corresponding to the given name. """
return cls._registry[name](*args, **kwargs)
def __init__(self):
self._processed = False
self._entries = []
self.profiler = None
def start(self):
""" Start the collector. """
def stop(self):
""" Stop the collector. """
def add(self, entry=None, frame=None):
""" Add an entry (dict) to this collector. """
# todo add entry count limit
self._entries.append({
'stack': self._get_stack_trace(frame),
'exec_context': getattr(self.profiler.init_thread, 'exec_context', ()),
'start': real_time(),
**(entry or {}),
})
def _get_stack_trace(self, frame=None):
""" Return the stack trace to be included in a given entry. """
frame = frame or get_current_frame(self.profiler.init_thread)
return _get_stack_trace(frame, self.profiler.init_frame)
def post_process(self):
for entry in self._entries:
stack = entry.get('stack', [])
self.profiler._add_file_lines(stack)
@property
def entries(self):
""" Return the entries of the collector after postprocessing. """
if not self._processed:
self.post_process()
self._processed = True
return self._entries
class SQLCollector(Collector):
"""
Saves all executed queries in the current thread with the call stack.
"""
name = 'sql'
def start(self):
init_thread = self.profiler.init_thread
if not hasattr(init_thread, 'query_hooks'):
init_thread.query_hooks = []
init_thread.query_hooks.append(self.hook)
def stop(self):
self.profiler.init_thread.query_hooks.remove(self.hook)
def hook(self, cr, query, params, query_start, query_time):
self.add({
'query': str(query),
'full_query': str(cr._format(query, params)),
'start': query_start,
'time': query_time,
})
class PeriodicCollector(Collector):
"""
Record execution frames asynchronously at most every `interval` seconds.
:param interval (float): time to wait in seconds between two samples.
"""
name = 'traces_async'
def __init__(self, interval=0.01): # check duration. dynamic?
super().__init__()
self.active = False
self.frame_interval = interval
self.thread = threading.Thread(target=self.run)
self.last_frame = None
def run(self):
self.active = True
last_time = real_time()
while self.active: # maybe add a check on parent_thread state?
duration = real_time() - last_time
if duration > self.frame_interval * 10 and self.last_frame:
# The profiler has unexpectedly slept for more than 10 frame intervals. This may
# happen when calling a C library without releasing the GIL. In that case, the
# last frame was taken before the call, and the next frame is after the call, and
# the call itself does not appear in any of those frames: the duration of the call
# is incorrectly attributed to the last frame.
self._entries[-1]['stack'].append(('profiling', 0, '⚠ Profiler freezed for %s s' % duration, ''))
self.last_frame = None # skip duplicate detection for the next frame.
self.add()
last_time = real_time()
time.sleep(self.frame_interval)
self._entries.append({'stack': [], 'start': real_time()}) # add final end frame
def start(self):
interval = self.profiler.params.get('traces_async_interval')
if interval:
self.frame_interval = min(max(float(interval), 0.001), 1)
init_thread = self.profiler.init_thread
if not hasattr(init_thread, 'profile_hooks'):
init_thread.profile_hooks = []
init_thread.profile_hooks.append(self.add)
self.thread.start()
def stop(self):
self.active = False
self.thread.join()
self.profiler.init_thread.profile_hooks.remove(self.add)
def add(self, entry=None, frame=None):
""" Add an entry (dict) to this collector. """
frame = frame or get_current_frame(self.profiler.init_thread)
if frame == self.last_frame:
# don't save if the frame is exactly the same as the previous one.
# maybe modify the last entry to add a last seen?
return
self.last_frame = frame
super().add(entry=entry, frame=frame)
class SyncCollector(Collector):
"""
Record complete execution synchronously.
Note that --limit-memory-hard may need to be increased when launching Odoo.
"""
name = 'traces_sync'
def start(self):
assert not self._processed, "You cannot start SyncCollector after accessing entries."
sys.settrace(self.hook) # todo test setprofile, but maybe not multithread safe
def stop(self):
sys.settrace(None)
def hook(self, _frame, event, _arg=None):
if event == 'line':
return
entry = {'event': event, 'frame': _format_frame(_frame)}
if event == 'call' and _frame.f_back:
# we need the parent frame to determine the line number of the call
entry['parent_frame'] = _format_frame(_frame.f_back)
self.add(entry, frame=_frame)
return self.hook
def _get_stack_trace(self, frame=None):
# Getting the full stack trace is slow, and not useful in this case.
# SyncCollector only saves the top frame and event at each call and
# recomputes the complete stack at the end.
return None
def post_process(self):
# Transform the evented traces to full stack traces. This processing
# could be avoided since speedscope will transform that back to
# evented anyway, but it is actually simpler to integrate into the
# current speedscope logic, especially when mixed with SQLCollector.
# We could improve it by saving as evented and manage it later.
stack = []
for entry in self._entries:
frame = entry.pop('frame')
event = entry.pop('event')
if event == 'call':
if stack:
stack[-1] = entry.pop('parent_frame')
stack.append(frame)
elif event == 'return':
stack.pop()
entry['stack'] = stack[:]
super().post_process()
class QwebTracker():
@classmethod
def wrap_render(cls, method_render):
@functools.wraps(method_render)
def _tracked_method_render(self, template, values=None, **options):
current_thread = threading.current_thread()
execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
qweb_hooks = getattr(current_thread, 'qweb_hooks', ())
if execution_context_enabled or qweb_hooks:
# To have the new compilation cached because the generated code will change.
# Therefore 'profile' is a key to the cache.
options['profile'] = True
return method_render(self, template, values, **options)
return _tracked_method_render
@classmethod
def wrap_compile(cls, method_compile):
@functools.wraps(method_compile)
def _tracked_compile(self, template, options):
if not options.get('profile'):
return method_compile(self, template, options)
render_template = method_compile(self, template, options)
def profiled_method_compile(self, values):
ref = options.get('ref')
ref_xml = options.get('ref_xml')
qweb_tracker = QwebTracker(ref, ref_xml, self.env.cr)
self = self.with_context(qweb_tracker=qweb_tracker)
if qweb_tracker.execution_context_enabled:
with ExecutionContext(template=ref):
return render_template(self, values)
return render_template(self, values)
return profiled_method_compile
return _tracked_compile
@classmethod
def wrap_compile_directive(cls, method_compile_directive):
@functools.wraps(method_compile_directive)
def _tracked_compile_directive(self, el, options, directive, indent):
if not options.get('profile') or directive in ('content', 'tag'):
return method_compile_directive(self, el, options, directive, indent)
enter = self._indent(f"self.env.context['qweb_tracker'].enter_directive({directive!r}, {el.attrib!r}, {options['last_path_node']!r})", indent)
leave = self._indent("self.env.context['qweb_tracker'].leave_directive()", indent)
code_directive = method_compile_directive(self, el, options, directive, indent)
return [enter, *code_directive, leave] if code_directive else []
return _tracked_compile_directive
def __init__(self, view_id, arch, cr):
current_thread = threading.current_thread() # don't store current_thread on self
self.execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb')
self.qweb_hooks = getattr(current_thread, 'qweb_hooks', ())
self.context_stack = []
self.cr = cr
self.view_id = view_id
for hook in self.qweb_hooks:
hook('render', self.cr.sql_log_count, view_id=view_id, arch=arch)
def enter_directive(self, directive, attrib, xpath):
execution_context = None
if self.execution_context_enabled:
execution_context = tools.profiler.ExecutionContext(directive=directive, xpath=xpath)
execution_context.__enter__()
self.context_stack.append(execution_context)
for hook in self.qweb_hooks:
hook('enter', self.cr.sql_log_count, view_id=self.view_id, xpath=xpath, directive=directive, attrib=attrib)
def leave_directive(self):
if self.execution_context_enabled:
self.context_stack.pop().__exit__()
for hook in self.qweb_hooks:
hook('leave', self.cr.sql_log_count)
class QwebCollector(Collector):
"""
Record qweb execution with directive trace.
"""
name = 'qweb'
def __init__(self):
super().__init__()
self.events = []
def hook(event, sql_log_count, **kwargs):
self.events.append((event, kwargs, sql_log_count, real_time()))
self.hook = hook
def _get_directive_profiling_name(self, directive, attrib):
expr = ''
if directive == 'set':
expr = f"t-set={repr(attrib['t-set'])}"
if 't-value' in attrib:
expr = f"{expr} t-value={repr(attrib['t-value'])}"
if 't-valuef' in attrib:
expr = f"{expr} t-valuef={repr(attrib['t-valuef'])}"
elif directive == 'foreach':
expr = f"t-foreach={repr(attrib['t-foreach'])} t-as={repr(attrib['t-as'])}"
elif directive == 'options':
if attrib.get('t-options'):
expr = f"t-options={repr(attrib['t-options'])}"
for key in list(attrib):
if key.startswith('t-options-'):
expr = f"{expr} {key}={repr(attrib[key])}"
elif directive and ('t-' + directive) in attrib:
expr = f"t-{directive}={repr(attrib['t-' + directive])}"
return expr
def start(self):
init_thread = self.profiler.init_thread
if not hasattr(init_thread, 'qweb_hooks'):
init_thread.qweb_hooks = []
init_thread.qweb_hooks.append(self.hook)
def stop(self):
self.profiler.init_thread.qweb_hooks.remove(self.hook)
def post_process(self):
last_event_query = None
last_event_time = None
stack = []
results = []
archs = {}
for event, kwargs, sql_count, time in self.events:
if event == 'render':
archs[kwargs['view_id']] = kwargs['arch']
continue
# update the active directive with the elapsed time and queries
if stack:
top = stack[-1]
top['delay'] += time - last_event_time
top['query'] += sql_count - last_event_query
last_event_time = time
last_event_query = sql_count
if event == 'enter':
data = {
'view_id': kwargs['view_id'],
'xpath': kwargs['xpath'],
'directive': self._get_directive_profiling_name(kwargs['directive'], kwargs['attrib']),
'delay': 0,
'query': 0,
}
results.append(data)
stack.append(data)
else:
assert event == "leave"
data = stack.pop()
self.add({'results': {'archs': archs, 'data': results}})
super().post_process()
class ExecutionContext:
"""
Add some context on thread at current call stack level.
This context stored by collector beside stack and is used by Speedscope
to add a level to the stack with this information.
"""
def __init__(self, **context):
self.context = context
self.previous_context = None
def __enter__(self):
current_thread = threading.current_thread()
self.previous_context = getattr(current_thread, 'exec_context', ())
current_thread.exec_context = self.previous_context + ((stack_size(), self.context),)
def __exit__(self, *_args):
threading.current_thread().exec_context = self.previous_context
class Profiler:
"""
Context manager to use to start the recording of some execution.
Will save sql and async stack trace by default.
"""
def __init__(self, collectors=None, db=..., profile_session=None,
description=None, disable_gc=False, params=None):
"""
:param db: database name to use to save results.
Will try to define database automatically by default.
Use value ``None`` to not save results in a database.
:param collectors: list of string and Collector object Ex: ['sql', PeriodicCollector(interval=0.2)]. Use `None` for default collectors
:param profile_session: session description to use to reproup multiple profile. use make_session(name) for default format.
:param description: description of the current profiler Suggestion: (route name/test method/loading module, ...)
:param disable_gc: flag to disable gc durring profiling (usefull to avoid gc while profiling, especially during sql execution)
:param params: parameters usable by collectors (like frame interval)
"""
self.start_time = 0
self.duration = 0
self.profile_session = profile_session or make_session()
self.description = description
self.init_frame = None
self.init_stack_trace = None
self.init_thread = None
self.disable_gc = disable_gc
self.filecache = {}
self.params = params or {} # custom parameters usable by collectors
self.profile_id = None
if db is ...:
# determine database from current thread
db = getattr(threading.current_thread(), 'dbname', None)
if not db:
# only raise if path is not given and db is not explicitely disabled
raise Exception('Database name cannot be defined automaticaly. \n Please provide a valid/falsy dbname or path parameter')
self.db = db
# collectors
if collectors is None:
collectors = ['sql', 'traces_async']
self.collectors = []
for collector in collectors:
if isinstance(collector, str):
try:
collector = Collector.make(collector)
except Exception:
_logger.error("Could not create collector with name %r", collector)
continue
collector.profiler = self
self.collectors.append(collector)
def __enter__(self):
self.init_thread = threading.current_thread()
self.init_frame = get_current_frame(self.init_thread)
self.init_stack_trace = _get_stack_trace(self.init_frame)
if self.description is None:
frame = self.init_frame
code = frame.f_code
self.description = f"{frame.f_code.co_name} ({code.co_filename}:{frame.f_lineno})"
if self.params:
self.init_thread.profiler_params = self.params
if self.disable_gc and gc.isenabled():
gc.disable()
self.start_time = real_time()
for collector in self.collectors:
collector.start()
return self
def __exit__(self, *args):
try:
for collector in self.collectors:
collector.stop()
self.duration = real_time() - self.start_time
self._add_file_lines(self.init_stack_trace)
if self.db:
# pylint: disable=import-outside-toplevel
from odoo.sql_db import db_connect # only import from odoo if/when needed.
with db_connect(self.db).cursor() as cr:
values = {
"name": self.description,
"session": self.profile_session,
"create_date": real_datetime_now(),
"init_stack_trace": json.dumps(_format_stack(self.init_stack_trace)),
"duration": self.duration,
"entry_count": self.entry_count(),
}
for collector in self.collectors:
if collector.entries:
values[collector.name] = json.dumps(collector.entries)
query = sql.SQL("INSERT INTO {}({}) VALUES %s RETURNING id").format(
sql.Identifier("ir_profile"),
sql.SQL(",").join(map(sql.Identifier, values)),
)
cr.execute(query, [tuple(values.values())])
self.profile_id = cr.fetchone()[0]
_logger.info('ir_profile %s (%s) created', self.profile_id, self.profile_session)
finally:
if self.disable_gc:
gc.enable()
if self.params:
del self.init_thread.profiler_params
def _add_file_lines(self, stack):
for index, frame in enumerate(stack):
(filename, lineno, name, line) = frame
if line != '':
continue
# retrieve file lines from the filecache
if not lineno:
continue
try:
filelines = self.filecache[filename]
except KeyError:
try:
with tools.file_open(filename, filter_ext=('.py',)) as f:
filelines = f.readlines()
except (ValueError, FileNotFoundError): # mainly for <decorator> "filename"
filelines = None
self.filecache[filename] = filelines
# fill in the line
if filelines is not None:
line = filelines[lineno - 1]
stack[index] = (filename, lineno, name, line)
def entry_count(self):
""" Return the total number of entries collected in this profiler. """
return sum(len(collector.entries) for collector in self.collectors)
def format_path(self, path):
"""
Utility function to format a path for this profiler.
This is mainly useful to uniquify a path between executions.
"""
return path.format(
time=real_datetime_now().strftime("%Y%m%d-%H%M%S"),
len=self.entry_count(),
desc=re.sub("[^0-9a-zA-Z-]+", "_", self.description)
)
def json(self):
"""
Utility function to generate a json version of this profiler.
This is useful to write profiling entries into a file, such as::
with Profiler(db=None) as profiler:
do_stuff()
filename = p.format_path('/home/foo/{desc}_{len}.json')
with open(filename, 'w') as f:
f.write(profiler.json())
"""
return json.dumps({
"name": self.description,
"session": self.profile_session,
"create_date": real_datetime_now().strftime("%Y%m%d-%H%M%S"),
"init_stack_trace": _format_stack(self.init_stack_trace),
"duration": self.duration,
"collectors": {collector.name: collector.entries for collector in self.collectors},
}, indent=4)
class Nested:
"""
Utility to nest another context manager inside a profiler.
The profiler should only be called directly in the "with" without nesting it
with ExitStack. If not, the retrieval of the 'init_frame' may be incorrect
and lead to an error "Limit frame was not found" when profiling. Since the
stack will ignore all stack frames inside this file, the nested frames will
be ignored, too. This is also why Nested() does not use
contextlib.contextmanager.
"""
def __init__(self, profiler, context_manager):
self.profiler = profiler
self.context_manager = context_manager
def __enter__(self):
self.profiler.__enter__()
return self.context_manager.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
try:
return self.context_manager.__exit__(exc_type, exc_value, traceback)
finally:
self.profiler.__exit__(exc_type, exc_value, traceback)
| 38.607813 | 24,709 |
2,092 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
import json as json_
import re
import markupsafe
JSON_SCRIPTSAFE_MAPPER = {
'&': r'\u0026',
'<': r'\u003c',
'>': r'\u003e',
'\u2028': r'\u2028',
'\u2029': r'\u2029'
}
class _ScriptSafe(str):
def __html__(self):
# replacement can be done straight in the serialised JSON as the
# problematic characters are not JSON metacharacters (and can thus
# only occur in strings)
return markupsafe.Markup(re.sub(
r'[<>&\u2028\u2029]',
lambda m: JSON_SCRIPTSAFE_MAPPER[m[0]],
self,
))
class JSON:
def loads(self, *args, **kwargs):
return json_.loads(*args, **kwargs)
def dumps(self, *args, **kwargs):
""" JSON used as JS in HTML (script tags) is problematic: <script>
tags are a special context which only waits for </script> but doesn't
interpret anything else, this means standard htmlescaping does not
work (it breaks double quotes, and e.g. `<` will become `<` *in
the resulting JSON/JS* not just inside the page).
However, failing to escape embedded json means the json strings could
contains `</script>` and thus become XSS vector.
The solution turns out to be very simple: use JSON-level unicode
escapes for HTML-unsafe characters (e.g. "<" -> "\u003C". This removes
the XSS issue without breaking the json, and there is no difference to
the end result once it's been parsed back from JSON. So it will work
properly even for HTML attributes or raw text.
Also handle U+2028 and U+2029 the same way just in case as these are
interpreted as newlines in javascript but not in JSON, which could
lead to oddities and issues.
.. warning::
except inside <script> elements, this should be escaped following
the normal rules of the containing format
Cf https://code.djangoproject.com/ticket/17419#comment:27
"""
return _ScriptSafe(json_.dumps(*args, **kwargs))
scriptsafe = JSON()
| 38.036364 | 2,092 |
21,978 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import binascii
import io
from PIL import Image, ImageOps
# We can preload Ico too because it is considered safe
from PIL import IcoImagePlugin
try:
from PIL.Image import Transpose, Palette, Resampling
except ImportError:
Transpose = Palette = Resampling = Image
from random import randrange
from odoo.exceptions import UserError
from odoo.tools.translate import _
# Preload PIL with the minimal subset of image formats we need
Image.preinit()
Image._initialized = 2
# Maps only the 6 first bits of the base64 data, accurate enough
# for our purpose and faster than decoding the full blob first
FILETYPE_BASE64_MAGICWORD = {
b'/': 'jpg',
b'R': 'gif',
b'i': 'png',
b'P': 'svg+xml',
}
EXIF_TAG_ORIENTATION = 0x112
# The target is to have 1st row/col to be top/left
# Note: rotate is counterclockwise
EXIF_TAG_ORIENTATION_TO_TRANSPOSE_METHODS = { # Initial side on 1st row/col:
0: [], # reserved
1: [], # top/left
2: [Transpose.FLIP_LEFT_RIGHT], # top/right
3: [Transpose.ROTATE_180], # bottom/right
4: [Transpose.FLIP_TOP_BOTTOM], # bottom/left
5: [Transpose.FLIP_LEFT_RIGHT, Transpose.ROTATE_90],# left/top
6: [Transpose.ROTATE_270], # right/top
7: [Transpose.FLIP_TOP_BOTTOM, Transpose.ROTATE_90],# right/bottom
8: [Transpose.ROTATE_90], # left/bottom
}
# Arbitrary limit to fit most resolutions, including Samsung Galaxy A22 photo,
# 8K with a ratio up to 16:10, and almost all variants of 4320p
IMAGE_MAX_RESOLUTION = 50e6
class ImageProcess():
def __init__(self, base64_source, verify_resolution=True):
"""Initialize the `base64_source` image for processing.
:param base64_source: the original image base64 encoded
No processing will be done if the `base64_source` is falsy or if
the image is SVG.
:type base64_source: string or bytes
:param verify_resolution: if True, make sure the original image size is not
excessive before starting to process it. The max allowed resolution is
defined by `IMAGE_MAX_RESOLUTION`.
:type verify_resolution: bool
:return: self
:rtype: ImageProcess
:raise: ValueError if `verify_resolution` is True and the image is too large
:raise: UserError if the base64 is incorrect or the image can't be identified by PIL
"""
self.base64_source = base64_source or False
self.operationsCount = 0
if not base64_source or base64_source[:1] in (b'P', 'P'):
# don't process empty source or SVG
self.image = False
else:
self.image = base64_to_image(self.base64_source)
# Original format has to be saved before fixing the orientation or
# doing any other operations because the information will be lost on
# the resulting image.
self.original_format = (self.image.format or '').upper()
self.image = image_fix_orientation(self.image)
w, h = self.image.size
if verify_resolution and w * h > IMAGE_MAX_RESOLUTION:
raise UserError(_("Image size excessive, uploaded images must be smaller than %s million pixels.", str(IMAGE_MAX_RESOLUTION / 1e6)))
def image_quality(self, quality=0, output_format=''):
"""Return the image resulting of all the image processing
operations that have been applied previously.
Return False if the initialized `image` was falsy, and return
the initialized `image` without change if it was SVG.
Also return the initialized `image` if no operations have been applied
and the `output_format` is the same as the original format and the
quality is not specified.
:param quality: quality setting to apply. Default to 0.
- for JPEG: 1 is worse, 95 is best. Values above 95 should be
avoided. Falsy values will fallback to 95, but only if the image
was changed, otherwise the original image is returned.
- for PNG: set falsy to prevent conversion to a WEB palette.
- for other formats: no effect.
:type quality: int
:param output_format: the output format. Can be PNG, JPEG, GIF, or ICO.
Default to the format of the original image. BMP is converted to
PNG, other formats than those mentioned above are converted to JPEG.
:type output_format: string
:return: image
:rtype: bytes or False
"""
if not self.image:
return self.image
output_image = self.image
output_format = output_format.upper() or self.original_format
if output_format == 'BMP':
output_format = 'PNG'
elif output_format not in ['PNG', 'JPEG', 'GIF', 'ICO']:
output_format = 'JPEG'
if not self.operationsCount and output_format == self.original_format and not quality:
return self.image
opt = {'format': output_format}
if output_format == 'PNG':
opt['optimize'] = True
if quality:
if output_image.mode != 'P':
# Floyd Steinberg dithering by default
output_image = output_image.convert('RGBA').convert('P', palette=Palette.WEB, colors=256)
if output_format == 'JPEG':
opt['optimize'] = True
opt['quality'] = quality or 95
if output_format == 'GIF':
opt['optimize'] = True
opt['save_all'] = True
if output_image.mode not in ["1", "L", "P", "RGB", "RGBA"] or (output_format == 'JPEG' and output_image.mode == 'RGBA'):
output_image = output_image.convert("RGB")
return image_apply_opt(output_image, **opt)
# TODO: rename to image_quality_base64 in master~saas-15.1
def image_base64(self, quality=0, output_format=''):
"""Return the base64 encoded image resulting of all the image processing
operations that have been applied previously.
Return False if the initialized `base64_source` was falsy, and return
the initialized `base64_source` without change if it was SVG.
Also return the initialized `base64_source` if no operations have been
applied and the `output_format` is the same as the original format and
the quality is not specified.
:param quality: quality setting to apply. Default to 0.
- for JPEG: 1 is worse, 95 is best. Values above 95 should be
avoided. Falsy values will fallback to 95, but only if the image
was changed, otherwise the original image is returned.
- for PNG: set falsy to prevent conversion to a WEB palette.
- for other formats: no effect.
:type quality: int
:param output_format: the output format. Can be PNG, JPEG, GIF, or ICO.
Default to the format of the original image. BMP is converted to
PNG, other formats than those mentioned above are converted to JPEG.
:type output_format: string
:return: image base64 encoded or False
:rtype: bytes or False
"""
if not self.image:
return self.base64_source
stream = self.image_quality(quality=quality, output_format=output_format)
if stream != self.image:
return base64.b64encode(stream)
return self.base64_source
def resize(self, max_width=0, max_height=0):
"""Resize the image.
The image is never resized above the current image size. This method is
only to create a smaller version of the image.
The current ratio is preserved. To change the ratio, see `crop_resize`.
If `max_width` or `max_height` is falsy, it will be computed from the
other to keep the current ratio. If both are falsy, no resize is done.
It is currently not supported for GIF because we do not handle all the
frames properly.
:param max_width: max width
:type max_width: int
:param max_height: max height
:type max_height: int
:return: self to allow chaining
:rtype: ImageProcess
"""
if self.image and self.original_format != 'GIF' and (max_width or max_height):
w, h = self.image.size
asked_width = max_width or (w * max_height) // h
asked_height = max_height or (h * max_width) // w
if asked_width != w or asked_height != h:
self.image.thumbnail((asked_width, asked_height), Resampling.LANCZOS)
if self.image.width != w or self.image.height != h:
self.operationsCount += 1
return self
def crop_resize(self, max_width, max_height, center_x=0.5, center_y=0.5):
"""Crop and resize the image.
The image is never resized above the current image size. This method is
only to create smaller versions of the image.
Instead of preserving the ratio of the original image like `resize`,
this method will force the output to take the ratio of the given
`max_width` and `max_height`, so both have to be defined.
The crop is done before the resize in order to preserve as much of the
original image as possible. The goal of this method is primarily to
resize to a given ratio, and it is not to crop unwanted parts of the
original image. If the latter is what you want to do, you should create
another method, or directly use the `crop` method from PIL.
It is currently not supported for GIF because we do not handle all the
frames properly.
:param max_width: max width
:type max_width: int
:param max_height: max height
:type max_height: int
:param center_x: the center of the crop between 0 (left) and 1 (right)
Default to 0.5 (center).
:type center_x: float
:param center_y: the center of the crop between 0 (top) and 1 (bottom)
Default to 0.5 (center).
:type center_y: float
:return: self to allow chaining
:rtype: ImageProcess
"""
if self.image and self.original_format != 'GIF' and max_width and max_height:
w, h = self.image.size
# We want to keep as much of the image as possible -> at least one
# of the 2 crop dimensions always has to be the same value as the
# original image.
# The target size will be reached with the final resize.
if w / max_width > h / max_height:
new_w, new_h = w, (max_height * w) // max_width
else:
new_w, new_h = (max_width * h) // max_height, h
# No cropping above image size.
if new_w > w:
new_w, new_h = w, (new_h * w) // new_w
if new_h > h:
new_w, new_h = (new_w * h) // new_h, h
# Correctly place the center of the crop.
x_offset = int((w - new_w) * center_x)
h_offset = int((h - new_h) * center_y)
if new_w != w or new_h != h:
self.image = self.image.crop((x_offset, h_offset, x_offset + new_w, h_offset + new_h))
if self.image.width != w or self.image.height != h:
self.operationsCount += 1
return self.resize(max_width, max_height)
def colorize(self):
"""Replace the transparent background by a random color.
:return: self to allow chaining
:rtype: ImageProcess
"""
if self.image:
original = self.image
color = (randrange(32, 224, 24), randrange(32, 224, 24), randrange(32, 224, 24))
self.image = Image.new('RGB', original.size)
self.image.paste(color, box=(0, 0) + original.size)
self.image.paste(original, mask=original)
self.operationsCount += 1
return self
def image_process(base64_source, size=(0, 0), verify_resolution=False, quality=0, crop=None, colorize=False, output_format=''):
"""Process the `base64_source` image by executing the given operations and
return the result as a base64 encoded image.
"""
if not base64_source or ((not size or (not size[0] and not size[1])) and not verify_resolution and not quality and not crop and not colorize and not output_format):
# for performance: don't do anything if the image is falsy or if
# no operations have been requested
return base64_source
image = ImageProcess(base64_source, verify_resolution)
if size:
if crop:
center_x = 0.5
center_y = 0.5
if crop == 'top':
center_y = 0
elif crop == 'bottom':
center_y = 1
image.crop_resize(max_width=size[0], max_height=size[1], center_x=center_x, center_y=center_y)
else:
image.resize(max_width=size[0], max_height=size[1])
if colorize:
image.colorize()
return image.image_base64(quality=quality, output_format=output_format)
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def average_dominant_color(colors, mitigate=175, max_margin=140):
"""This function is used to calculate the dominant colors when given a list of colors
There are 5 steps :
1) Select dominant colors (highest count), isolate its values and remove
it from the current color set.
2) Set margins according to the prevalence of the dominant color.
3) Evaluate the colors. Similar colors are grouped in the dominant set
while others are put in the "remaining" list.
4) Calculate the average color for the dominant set. This is done by
averaging each band and joining them into a tuple.
5) Mitigate final average and convert it to hex
:param colors: list of tuples having:
[0] color count in the image
[1] actual color: tuple(R, G, B, A)
-> these can be extracted from a PIL image using image.getcolors()
:param mitigate: maximum value a band can reach
:param max_margin: maximum difference from one of the dominant values
:returns: a tuple with two items:
[0] the average color of the dominant set as: tuple(R, G, B)
[1] list of remaining colors, used to evaluate subsequent dominant colors
"""
dominant_color = max(colors)
dominant_rgb = dominant_color[1][:3]
dominant_set = [dominant_color]
remaining = []
margins = [max_margin * (1 - dominant_color[0] /
sum([col[0] for col in colors]))] * 3
colors.remove(dominant_color)
for color in colors:
rgb = color[1]
if (rgb[0] < dominant_rgb[0] + margins[0] and rgb[0] > dominant_rgb[0] - margins[0] and
rgb[1] < dominant_rgb[1] + margins[1] and rgb[1] > dominant_rgb[1] - margins[1] and
rgb[2] < dominant_rgb[2] + margins[2] and rgb[2] > dominant_rgb[2] - margins[2]):
dominant_set.append(color)
else:
remaining.append(color)
dominant_avg = []
for band in range(3):
avg = total = 0
for color in dominant_set:
avg += color[0] * color[1][band]
total += color[0]
dominant_avg.append(int(avg / total))
final_dominant = []
brightest = max(dominant_avg)
for color in range(3):
value = dominant_avg[color] / (brightest / mitigate) if brightest > mitigate else dominant_avg[color]
final_dominant.append(int(value))
return tuple(final_dominant), remaining
def image_fix_orientation(image):
"""Fix the orientation of the image if it has an EXIF orientation tag.
This typically happens for images taken from a non-standard orientation
by some phones or other devices that are able to report orientation.
The specified transposition is applied to the image before all other
operations, because all of them expect the image to be in its final
orientation, which is the case only when the first row of pixels is the top
of the image and the first column of pixels is the left of the image.
Moreover the EXIF tags will not be kept when the image is later saved, so
the transposition has to be done to ensure the final image is correctly
orientated.
Note: to be completely correct, the resulting image should have its exif
orientation tag removed, since the transpositions have been applied.
However since this tag is not used in the code, it is acceptable to
save the complexity of removing it.
:param image: the source image
:type image: PIL.Image
:return: the resulting image, copy of the source, with orientation fixed
or the source image if no operation was applied
:rtype: PIL.Image
"""
getexif = getattr(image, 'getexif', None) or getattr(image, '_getexif', None) # support PIL < 6.0
if getexif:
exif = getexif()
if exif:
orientation = exif.get(EXIF_TAG_ORIENTATION, 0)
for method in EXIF_TAG_ORIENTATION_TO_TRANSPOSE_METHODS.get(orientation, []):
image = image.transpose(method)
return image
return image
def base64_to_image(base64_source):
"""Return a PIL image from the given `base64_source`.
:param base64_source: the image base64 encoded
:type base64_source: string or bytes
:return: the PIL image
:rtype: PIL.Image
:raise: UserError if the base64 is incorrect or the image can't be identified by PIL
"""
try:
return Image.open(io.BytesIO(base64.b64decode(base64_source)))
except (OSError, binascii.Error):
raise UserError(_("This file could not be decoded as an image file. Please try with a different file."))
def image_apply_opt(image, format, **params):
"""Return the given PIL `image` using `params`.
:param image: the PIL image
:type image: PIL.Image
:param params: params to expand when calling PIL.Image.save()
:type params: dict
:return: the image formatted
:rtype: bytes
"""
if format == 'JPEG' and image.mode not in ['1', 'L', 'RGB']:
image = image.convert("RGB")
stream = io.BytesIO()
image.save(stream, format=format, **params)
return stream.getvalue()
def image_to_base64(image, format, **params):
"""Return a base64_image from the given PIL `image` using `params`.
:param image: the PIL image
:type image: PIL.Image
:param params: params to expand when calling PIL.Image.save()
:type params: dict
:return: the image base64 encoded
:rtype: bytes
"""
stream = image_apply_opt(image, format, **params)
return base64.b64encode(stream)
def is_image_size_above(base64_source_1, base64_source_2):
"""Return whether or not the size of the given image `base64_source_1` is
above the size of the given image `base64_source_2`.
"""
if not base64_source_1 or not base64_source_2:
return False
if base64_source_1[:1] in (b'P', 'P') or base64_source_2[:1] in (b'P', 'P'):
# False for SVG
return False
image_source = image_fix_orientation(base64_to_image(base64_source_1))
image_target = image_fix_orientation(base64_to_image(base64_source_2))
return image_source.width > image_target.width or image_source.height > image_target.height
def image_guess_size_from_field_name(field_name):
"""Attempt to guess the image size based on `field_name`.
If it can't be guessed or if it is a custom field: return (0, 0) instead.
:param str field_name: the name of a field
:return: the guessed size
:rtype: tuple (width, height)
"""
if field_name == 'image':
return (1024, 1024)
if field_name.startswith('x_'):
return (0, 0)
try:
suffix = int(field_name.split('_')[-1])
except ValueError:
return (0, 0)
if suffix < 16:
# If the suffix is less than 16, it's probably not the size
return (0, 0)
return (suffix, suffix)
def image_data_uri(base64_source):
"""This returns data URL scheme according RFC 2397
(https://tools.ietf.org/html/rfc2397) for all kind of supported images
(PNG, GIF, JPG and SVG), defaulting on PNG type if not mimetype detected.
"""
return 'data:image/%s;base64,%s' % (
FILETYPE_BASE64_MAGICWORD.get(base64_source[:1], 'png'),
base64_source.decode(),
)
def get_saturation(rgb):
"""Returns the saturation (hsl format) of a given rgb color
:param rgb: rgb tuple or list
:return: saturation
"""
c_max = max(rgb) / 255
c_min = min(rgb) / 255
d = c_max - c_min
return 0 if d == 0 else d / (1 - abs(c_max + c_min - 1))
def get_lightness(rgb):
"""Returns the lightness (hsl format) of a given rgb color
:param rgb: rgb tuple or list
:return: lightness
"""
return (max(rgb) + min(rgb)) / 2 / 255
def hex_to_rgb(hx):
"""Converts an hexadecimal string (starting with '#') to a RGB tuple"""
return tuple([int(hx[i:i+2], 16) for i in range(1, 6, 2)])
def rgb_to_hex(rgb):
"""Converts a RGB tuple or list to an hexadecimal string"""
return '#' + ''.join([(hex(c).split('x')[-1].zfill(2)) for c in rgb])
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = base64.b64encode(open(sys.argv[1],'rb').read())
new = image_process(img, size=(128, 100))
open(sys.argv[2], 'wb').write(base64.b64decode(new))
| 38.423077 | 21,978 |
9,790 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
__all__ = ['synchronized', 'lazy_classproperty', 'lazy_property',
'classproperty', 'conditional', 'lazy']
from functools import wraps
from inspect import getsourcefile
from json import JSONEncoder
class lazy_property(object):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget):
assert not fget.__name__.startswith('__'),\
"lazy_property does not support mangled names"
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.fget.__name__, value)
return value
@property
def __doc__(self):
return self.fget.__doc__
@staticmethod
def reset_all(obj):
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in list(obj_dict):
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
class lazy_classproperty(lazy_property):
""" Similar to :class:`lazy_property`, but for classes. """
def __get__(self, obj, cls):
val = self.fget(cls)
setattr(cls, self.fget.__name__, val)
return val
def conditional(condition, decorator):
""" Decorator for a conditionally applied decorator.
Example:
@conditional(get_config('use_cache'), ormcache)
def fn():
pass
"""
if condition:
return decorator
else:
return lambda fn: fn
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
def compose(a, b):
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
equivalent to ``a(b(*args))``.
Can be used as a decorator by partially applying ``a``::
@partial(compose, a)
def b():
...
"""
@wraps(b)
def wrapper(*args, **kwargs):
return a(b(*args, **kwargs))
return wrapper
class _ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def classproperty(func):
return _ClassProperty(classmethod(func))
class lazy(object):
""" A proxy to the (memoized) result of a lazy evaluation::
foo = lazy(func, arg) # func(arg) is not called yet
bar = foo + 1 # eval func(arg) and add 1
baz = foo + 2 # use result of func(arg) and add 2
"""
__slots__ = ['_func', '_args', '_kwargs', '_cached_value']
def __init__(self, func, *args, **kwargs):
# bypass own __setattr__
object.__setattr__(self, '_func', func)
object.__setattr__(self, '_args', args)
object.__setattr__(self, '_kwargs', kwargs)
@property
def _value(self):
if self._func is not None:
value = self._func(*self._args, **self._kwargs)
object.__setattr__(self, '_func', None)
object.__setattr__(self, '_args', None)
object.__setattr__(self, '_kwargs', None)
object.__setattr__(self, '_cached_value', value)
return self._cached_value
def __getattr__(self, name): return getattr(self._value, name)
def __setattr__(self, name, value): return setattr(self._value, name, value)
def __delattr__(self, name): return delattr(self._value, name)
def __repr__(self):
return repr(self._value) if self._func is None else object.__repr__(self)
def __str__(self): return str(self._value)
def __bytes__(self): return bytes(self._value)
def __format__(self, format_spec): return format(self._value, format_spec)
def __lt__(self, other): return self._value < other
def __le__(self, other): return self._value <= other
def __eq__(self, other): return self._value == other
def __ne__(self, other): return self._value != other
def __gt__(self, other): return self._value > other
def __ge__(self, other): return self._value >= other
def __hash__(self): return hash(self._value)
def __bool__(self): return bool(self._value)
def __call__(self, *args, **kwargs): return self._value(*args, **kwargs)
def __len__(self): return len(self._value)
def __getitem__(self, key): return self._value[key]
def __missing__(self, key): return self._value.__missing__(key)
def __setitem__(self, key, value): self._value[key] = value
def __delitem__(self, key): del self._value[key]
def __iter__(self): return iter(self._value)
def __reversed__(self): return reversed(self._value)
def __contains__(self, key): return key in self._value
def __add__(self, other): return self._value.__add__(other)
def __sub__(self, other): return self._value.__sub__(other)
def __mul__(self, other): return self._value.__mul__(other)
def __matmul__(self, other): return self._value.__matmul__(other)
def __truediv__(self, other): return self._value.__truediv__(other)
def __floordiv__(self, other): return self._value.__floordiv__(other)
def __mod__(self, other): return self._value.__mod__(other)
def __divmod__(self, other): return self._value.__divmod__(other)
def __pow__(self, other): return self._value.__pow__(other)
def __lshift__(self, other): return self._value.__lshift__(other)
def __rshift__(self, other): return self._value.__rshift__(other)
def __and__(self, other): return self._value.__and__(other)
def __xor__(self, other): return self._value.__xor__(other)
def __or__(self, other): return self._value.__or__(other)
def __radd__(self, other): return self._value.__radd__(other)
def __rsub__(self, other): return self._value.__rsub__(other)
def __rmul__(self, other): return self._value.__rmul__(other)
def __rmatmul__(self, other): return self._value.__rmatmul__(other)
def __rtruediv__(self, other): return self._value.__rtruediv__(other)
def __rfloordiv__(self, other): return self._value.__rfloordiv__(other)
def __rmod__(self, other): return self._value.__rmod__(other)
def __rdivmod__(self, other): return self._value.__rdivmod__(other)
def __rpow__(self, other): return self._value.__rpow__(other)
def __rlshift__(self, other): return self._value.__rlshift__(other)
def __rrshift__(self, other): return self._value.__rrshift__(other)
def __rand__(self, other): return self._value.__rand__(other)
def __rxor__(self, other): return self._value.__rxor__(other)
def __ror__(self, other): return self._value.__ror__(other)
def __iadd__(self, other): return self._value.__iadd__(other)
def __isub__(self, other): return self._value.__isub__(other)
def __imul__(self, other): return self._value.__imul__(other)
def __imatmul__(self, other): return self._value.__imatmul__(other)
def __itruediv__(self, other): return self._value.__itruediv__(other)
def __ifloordiv__(self, other): return self._value.__ifloordiv__(other)
def __imod__(self, other): return self._value.__imod__(other)
def __ipow__(self, other): return self._value.__ipow__(other)
def __ilshift__(self, other): return self._value.__ilshift__(other)
def __irshift__(self, other): return self._value.__irshift__(other)
def __iand__(self, other): return self._value.__iand__(other)
def __ixor__(self, other): return self._value.__ixor__(other)
def __ior__(self, other): return self._value.__ior__(other)
def __neg__(self): return self._value.__neg__()
def __pos__(self): return self._value.__pos__()
def __abs__(self): return self._value.__abs__()
def __invert__(self): return self._value.__invert__()
def __complex__(self): return complex(self._value)
def __int__(self): return int(self._value)
def __float__(self): return float(self._value)
def __index__(self): return self._value.__index__()
def __round__(self): return self._value.__round__()
def __trunc__(self): return self._value.__trunc__()
def __floor__(self): return self._value.__floor__()
def __ceil__(self): return self._value.__ceil__()
def __enter__(self): return self._value.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
return self._value.__exit__(exc_type, exc_value, traceback)
def __await__(self): return self._value.__await__()
def __aiter__(self): return self._value.__aiter__()
def __anext__(self): return self._value.__anext__()
def __aenter__(self): return self._value.__aenter__()
def __aexit__(self, exc_type, exc_value, traceback):
return self._value.__aexit__(exc_type, exc_value, traceback)
| 39.796748 | 9,790 |
8,117 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
r"""
Vendored copy of https://github.com/pallets/werkzeug/blob/2b2c4c3dd3cf7389e9f4aa06371b7332257c6289/src/werkzeug/contrib/sessions.py
werkzeug.contrib was removed from werkzeug 1.0. sessions (and secure
cookies) were moved to the secure-cookies package. Problem is distros
are starting to update werkzeug to 1.0 without having secure-cookies
(e.g. Arch has done so, Debian has updated python-werkzeug in
"experimental"), which will be problematic once that starts trickling
down onto more stable distros and people start deploying that.
Edited some to fix imports and remove some compatibility things
(mostly PY2) and the unnecessary (to us) SessionMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import os
import re
import tempfile
from hashlib import sha1
from os import path, replace as rename
from pickle import dump
from pickle import HIGHEST_PROTOCOL
from pickle import load
from time import time
from werkzeug.datastructures import CallbackDict
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode("ascii")
return sha1(b"".join([salt, str(time()).encode("ascii"), os.urandom(30)])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ("modified",)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
dict.__repr__(self),
"*" if self.should_save else "",
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = ".__wz_sess"
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(
self,
path=None,
filename_template="werkzeug_%s.sess",
session_class=None,
renew_missing=False,
mode=0o644,
):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
assert not filename_template.endswith(_fs_transaction_suffix), (
"filename templates may not end with %s" % _fs_transaction_suffix
)
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
f = os.fdopen(fd, "wb")
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), "rb")
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split("%s", 1)
filename_re = re.compile(
r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
)
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
| 32.729839 | 8,117 |
5,528 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
This package was vendored in odoo in order to prevent errors with werkzeug 2.1
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
("cros", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
(r"darwin|mac|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
("openbsd", "openbsd"),
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
(r"x11|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
("aix", "aix"),
("sco|unix_sv", "sco"),
("bsd", "bsd"),
("amiga", "amiga"),
("blackberry|playbook", "blackberry"),
("symbian", "symbian"),
)
browsers = (
("googlebot", "google"),
("msnbot", "msn"),
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
("opera", "opera"),
("edge", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
("galeon", "galeon"),
("safari|version", "safari"),
("webkit", "webkit"),
("camino", "camino"),
("konqueror", "konqueror"),
("k-meleon", "kmeleon"),
("netscape", "netscape"),
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
("lynx", "lynx"),
("links", "links"),
("Baiduspider", "baidu"),
("bingbot", "bing"),
("mozilla", "mozilla"),
)
_browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
_language_re = re.compile(
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [
(b, re.compile(self._browser_version_re % a, re.I))
for a, b in self.browsers
]
def __call__(self, user_agent):
for platform, regex in self.platforms: # noqa: B007
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers: # noqa: B007
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `blackberry`
- `bsd`
- `chromeos`
- `dragonflybsd`
- `freebsd`
- `hpux`
- `ipad`
- `iphone`
- `irix`
- `linux`
- `macos`
- `netbsd`
- `openbsd`
- `sco`
- `solaris`
- `symbian`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `baidu` *
- `bing` *
- `camino`
- `chrome`
- `edge`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `mozilla`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers marked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = self._parser(
environ_or_string
)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
| 27.098039 | 5,528 |
9,953 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
import warnings
from zlib import crc32
from odoo.tools import lazy_property
IDENT_RE = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
def _from_table(table, alias):
""" Return a FROM clause element from ``table`` and ``alias``. """
if alias == table:
return f'"{alias}"'
elif IDENT_RE.match(table):
return f'"{table}" AS "{alias}"'
else:
return f'({table}) AS "{alias}"'
def _generate_table_alias(src_table_alias, link):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, the joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- the name is shortcut if it goes beyond PostgreSQL's identifier limits
Examples:
- src_table_alias='res_users', link='parent_id'
alias = 'res_users__parent_id'
:param str src_table_alias: alias of the source table
:param str link: field name
:return str: alias
"""
alias = "%s__%s" % (src_table_alias, link)
# Use an alternate alias scheme if length exceeds the PostgreSQL limit
# of 63 characters.
if len(alias) >= 64:
# We have to fit a crc32 hash and one underscore into a 63 character
# alias. The remaining space we can use to add a human readable prefix.
alias = "%s_%08x" % (alias[:54], crc32(alias.encode('utf-8')))
return alias
class Query(object):
""" Simple implementation of a query object, managing tables with aliases,
join clauses (with aliases, condition and parameters), where clauses (with
parameters), order, limit and offset.
:param cr: database cursor (for lazy evaluation)
:param alias: name or alias of the table
:param table: if given, a table expression (identifier or query)
"""
def __init__(self, cr, alias, table=None):
# database cursor
self._cr = cr
# tables {alias: table}
self._tables = {alias: table or alias}
# joins {alias: (kind, table, condition, condition_params)}
self._joins = {}
# holds the list of WHERE clause elements (to be joined with 'AND'), and
# the list of parameters
self._where_clauses = []
self._where_params = []
# order, limit, offset
self.order = None
self.limit = None
self.offset = None
def add_table(self, alias, table=None):
""" Add a table with a given alias to the from clause. """
assert alias not in self._tables and alias not in self._joins, "Alias %r already in %s" % (alias, str(self))
self._tables[alias] = table or alias
def add_where(self, where_clause, where_params=()):
""" Add a condition to the where clause. """
self._where_clauses.append(where_clause)
self._where_params.extend(where_params)
def join(self, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
"""
Perform a join between a table already present in the current Query object and
another table.
:param str lhs_alias: alias of a table already defined in the current Query object.
:param str lhs_column: column of `lhs_alias` to be used for the join's ON condition.
:param str rhs_table: name of the table to join to `lhs_alias`.
:param str rhs_column: column of `rhs_alias` to be used for the join's ON condition.
:param str link: used to generate the alias for the joined table, this string should
represent the relationship (the link) between both tables.
:param str extra: an sql string of a predicate or series of predicates to append to the
join's ON condition, `lhs_alias` and `rhs_alias` can be injected if the string uses
the `lhs` and `rhs` variables with the `str.format` syntax. e.g.::
query.join(..., extra="{lhs}.name != {rhs}.name OR ...", ...)
:param tuple extra_params: a tuple of values to be interpolated into `extra`, this is
done by psycopg2.
Full example:
>>> rhs_alias = query.join(
... "res_users",
... "partner_id",
... "res_partner",
... "id",
... "partner_id", # partner_id is the "link" from res_users to res_partner
... "{lhs}.\"name\" != %s",
... ("Mitchell Admin",),
... )
>>> rhs_alias
res_users_res_partner__partner_id
From the example above, the resulting query would be something like::
SELECT ...
FROM "res_users" AS "res_users"
JOIN "res_partner" AS "res_users_res_partner__partner_id"
ON "res_users"."partner_id" = "res_users_res_partner__partner_id"."id"
AND "res_users"."name" != 'Mitchell Admin'
WHERE ...
"""
return self._join('JOIN', lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
def left_join(self, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
""" Add a LEFT JOIN to the current table (if necessary), and return the
alias corresponding to ``rhs_table``.
See the documentation of :meth:`~odoo.osv.query.Query.join` for a better overview of the
arguments and what they do.
"""
return self._join('LEFT JOIN', lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
def _join(self, kind, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra=None, extra_params=()):
assert lhs_alias in self._tables or lhs_alias in self._joins, "Alias %r not in %s" % (lhs_alias, str(self))
rhs_alias = _generate_table_alias(lhs_alias, link)
assert rhs_alias not in self._tables, "Alias %r already in %s" % (rhs_alias, str(self))
if rhs_alias not in self._joins:
condition = f'"{lhs_alias}"."{lhs_column}" = "{rhs_alias}"."{rhs_column}"'
condition_params = []
if extra:
condition = condition + " AND " + extra.format(lhs=lhs_alias, rhs=rhs_alias)
condition_params = list(extra_params)
if kind:
self._joins[rhs_alias] = (kind, rhs_table, condition, condition_params)
else:
self._tables[rhs_alias] = rhs_table
self.add_where(condition, condition_params)
return rhs_alias
def select(self, *args):
""" Return the SELECT query as a pair ``(query_string, query_params)``. """
from_clause, where_clause, params = self.get_sql()
query_str = 'SELECT {} FROM {} WHERE {}{}{}{}'.format(
", ".join(args or [f'"{next(iter(self._tables))}".id']),
from_clause,
where_clause or "TRUE",
(" ORDER BY %s" % self.order) if self.order else "",
(" LIMIT %d" % self.limit) if self.limit else "",
(" OFFSET %d" % self.offset) if self.offset else "",
)
return query_str, params
def subselect(self, *args):
""" Similar to :meth:`.select`, but for sub-queries.
This one avoids the ORDER BY clause when possible.
"""
if self.limit or self.offset:
# in this case, the ORDER BY clause is necessary
return self.select(*args)
from_clause, where_clause, params = self.get_sql()
query_str = 'SELECT {} FROM {} WHERE {}'.format(
", ".join(args or [f'"{next(iter(self._tables))}".id']),
from_clause,
where_clause or "TRUE",
)
return query_str, params
def get_sql(self):
""" Returns (query_from, query_where, query_params). """
tables = [_from_table(table, alias) for alias, table in self._tables.items()]
joins = []
params = []
for alias, (kind, table, condition, condition_params) in self._joins.items():
joins.append(f'{kind} {_from_table(table, alias)} ON ({condition})')
params.extend(condition_params)
from_clause = " ".join([", ".join(tables)] + joins)
where_clause = " AND ".join(self._where_clauses)
return from_clause, where_clause, params + self._where_params
@lazy_property
def _result(self):
query_str, params = self.select()
self._cr.execute(query_str, params)
return [row[0] for row in self._cr.fetchall()]
def __str__(self):
return '<osv.Query: %r with params: %r>' % self.select()
def __bool__(self):
return bool(self._result)
def __len__(self):
return len(self._result)
def __iter__(self):
return iter(self._result)
#
# deprecated attributes and methods
#
@property
def tables(self):
warnings.warn("deprecated Query.tables, use Query.get_sql() instead",
DeprecationWarning)
return tuple(_from_table(table, alias) for alias, table in self._tables.items())
@property
def where_clause(self):
return tuple(self._where_clauses)
@property
def where_clause_params(self):
return tuple(self._where_params)
def add_join(self, connection, implicit=True, outer=False, extra=None, extra_params=()):
warnings.warn("deprecated Query.add_join, use Query.join() or Query.left_join() instead",
DeprecationWarning)
lhs_alias, rhs_table, lhs_column, rhs_column, link = connection
kind = '' if implicit else ('LEFT JOIN' if outer else 'JOIN')
rhs_alias = self._join(kind, lhs_alias, lhs_column, rhs_table, rhs_column, link, extra, extra_params)
return rhs_alias, _from_table(rhs_table, rhs_alias)
| 40.45935 | 9,953 |
389 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ..exceptions import except_orm
from ..models import Model, TransientModel, AbstractModel
# Deprecated, kept for backward compatibility.
except_osv = except_orm
# Deprecated, kept for backward compatibility.
osv = Model
osv_memory = TransientModel
osv_abstract = AbstractModel # ;-)
| 29.923077 | 389 |
49,899 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_expression.py
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import collections.abc
import warnings
import logging
import reprlib
import traceback
from functools import partial
from datetime import date, datetime, time
import odoo.modules
from odoo.osv.query import Query, _generate_table_alias
from odoo.tools import pycompat
from odoo.tools.misc import get_lang
from ..models import MAGIC_COLUMNS, BaseModel
import odoo.tools as tools
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be preferred
# for consistency. This list doesn't contain '<>' as it is simplified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of', 'parent_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
# Negation of domain expressions
DOMAIN_OPERATORS_NEGATION = {
AND_OPERATOR: OR_OPERATOR,
OR_OPERATOR: AND_OPERATOR,
}
TERM_OPERATORS_NEGATION = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
'in': 'not in',
'like': 'not like',
'ilike': 'not ilike',
'not in': 'in',
'not like': 'like',
'not ilike': 'ilike',
}
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return [TRUE_LEAF]
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
if isinstance(token, (list, tuple)): # domain term
expected -= 1
token = tuple(token)
else:
expected += op_arity.get(token, 0) - 1
result.append(token)
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def is_false(model, domain):
""" Return whether ``domain`` is logically equivalent to false. """
# use three-valued logic: -1 is false, 0 is unknown, +1 is true
stack = []
for token in reversed(normalize_domain(domain)):
if token == '&':
stack.append(min(stack.pop(), stack.pop()))
elif token == '|':
stack.append(max(stack.pop(), stack.pop()))
elif token == '!':
stack.append(-stack.pop())
elif token == TRUE_LEAF:
stack.append(+1)
elif token == FALSE_LEAF:
stack.append(-1)
elif token[1] == 'in' and not (isinstance(token[2], Query) or token[2]):
stack.append(-1)
elif token[1] == 'not in' and not (isinstance(token[2], Query) or token[2]):
stack.append(+1)
else:
stack.append(0)
return stack.pop() == -1
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``.
It is guaranteed to return a normalized domain.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
if domains == [unit]:
return unit
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += normalize_domain(domain)
count += 1
result = [operator] * (count - 1) + result
return result or unit
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, [TRUE_LEAF], [FALSE_LEAF], domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, [FALSE_LEAF], [TRUE_LEAF], domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
# This is an iterative version of a recursive function that split domain
# into subdomains, processes them and combine the results. The "stack" below
# represents the recursive calls to be done.
result = []
stack = [False]
for token in domain:
negate = stack.pop()
# negate tells whether the subdomain starting with token must be negated
if is_leaf(token):
if negate:
left, operator, right = token
if operator in TERM_OPERATORS_NEGATION:
if token in (TRUE_LEAF, FALSE_LEAF):
result.append(FALSE_LEAF if token == TRUE_LEAF else TRUE_LEAF)
else:
result.append((left, TERM_OPERATORS_NEGATION[operator], right))
else:
result.append(NOT_OPERATOR)
result.append(token)
else:
result.append(token)
elif token == NOT_OPERATOR:
stack.append(not negate)
elif token in DOMAIN_OPERATORS_NEGATION:
result.append(DOMAIN_OPERATORS_NEGATION[token] if negate else token)
stack.append(negate)
stack.append(negate)
else:
result.append(token)
return result
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, str) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], str) and element[0])
or tuple(element) in (TRUE_LEAF, FALSE_LEAF))
def is_boolean(element):
return element == TRUE_LEAF or element == FALSE_LEAF
def check_leaf(element, internal=False):
if not is_operator(element) and not is_leaf(element, internal):
raise ValueError("Invalid leaf %s" % str(element))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def get_unaccent_wrapper(cr):
if odoo.registry(cr.dbname).has_unaccent:
return lambda x: "unaccent(%s)" % (x,)
return lambda x: x
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, domain, model, alias=None, query=None):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param domain: expression (using domain ('foo', '=', 'bar') format)
:param model: root model
:param alias: alias for the model table if query is provided
:param query: optional query object holding the final result
:attr root_model: base model for the query
:attr expression: the domain to parse, normalized and prepared
:attr result: the result of the parsing, as a pair (query, params)
:attr query: Query object holding the final result
"""
self._unaccent = get_unaccent_wrapper(model._cr)
self.root_model = model
self.root_alias = alias or model._table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(domain))
# this object handles all the joins
self.query = Query(model.env.cr, model._table, model._table_query) if query is None else query
# parse the domain expression
self.parse()
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
warnings.warn("deprecated expression.get_tables(), use expression.query instead",
DeprecationWarning)
return self.query.tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Three things can happen as a processing result:
- the leaf is a logic operator, and updates the result stack
accordingly;
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after;
- the leaf is converted to SQL and added to the result stack
Here is a suggested execution:
step stack result_stack
['&', A, B] []
substitute B ['&', A, B1] []
convert B1 in SQL ['&', A] ["B1"]
substitute A ['&', '|', A1, A2] ["B1"]
convert A2 in SQL ['&', '|', A1] ["B1", "A2"]
convert A1 in SQL ['&', '|'] ["B1", "A2", "A1"]
apply operator OR ['&'] ["B1", "A1 or A2"]
apply operator AND [] ["(A1 or A2) and B1"]
Some internal var explanation:
:var list path: left operand seen as a sequence of field names
("foo.bar" -> ["foo", "bar"])
:var obj model: model object, model containing the field
(the name provided in the left operand)
:var obj field: the field corresponding to `path[0]`
:var obj column: the column corresponding to `path[0]`
:var obj comodel: relational model of field (field.comodel)
(res_partner.bank_ids -> res.partner.bank)
"""
cr, uid, context, su = self.root_model.env.args
def to_ids(value, comodel, leaf):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on comodel for each name
return the list of related ids
"""
names = []
if isinstance(value, str):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, str) for item in value):
names = value
elif isinstance(value, int):
if not value:
# given this nonsensical domain, it is generally cheaper to
# interpret False as [], so that "X child_of False" will
# match nothing
_logger.warning("Unexpected domain [%s], interpreted as False", leaf)
return []
return [value]
if names:
return list({
rid
for name in names
for rid in comodel._name_search(name, [], 'ilike', limit=None)
})
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix=''):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_path tree lookup field
(when available), or as an expanded [(left,in,child_ids)] """
if not ids:
return [FALSE_LEAF]
if left_model._parent_store:
domain = OR([
[('parent_path', '=like', rec.parent_path + '%')]
for rec in left_model.sudo().browse(ids)
])
else:
# recursively retrieve all children nodes with sudo(); the
# filtering of forbidden records is done by the rest of the
# domain
parent_name = parent or left_model._parent_name
if (left_model._name != left_model._fields[parent_name].comodel_name):
raise ValueError(f"Invalid parent field: {left_model._fields[parent_name]}")
child_ids = set()
records = left_model.sudo().browse(ids)
while records:
child_ids.update(records._ids)
records = records.search([(parent_name, 'in', records.ids)], order='id') - records.browse(child_ids)
domain = [('id', 'in', list(child_ids))]
if prefix:
return [(left, 'in', left_model._search(domain, order='id'))]
return domain
def parent_of_domain(left, ids, left_model, parent=None, prefix=''):
""" Return a domain implementing the parent_of operator for [(left,parent_of,ids)],
either as a range using the parent_path tree lookup field
(when available), or as an expanded [(left,in,parent_ids)] """
if not ids:
return [FALSE_LEAF]
if left_model._parent_store:
parent_ids = [
int(label)
for rec in left_model.sudo().browse(ids)
for label in rec.parent_path.split('/')[:-1]
]
domain = [('id', 'in', parent_ids)]
else:
# recursively retrieve all parent nodes with sudo() to avoid
# access rights errors; the filtering of forbidden records is
# done by the rest of the domain
parent_name = parent or left_model._parent_name
parent_ids = set()
records = left_model.sudo().browse(ids)
while records:
parent_ids.update(records._ids)
records = records[parent_name] - records.browse(parent_ids)
domain = [('id', 'in', list(parent_ids))]
if prefix:
return [(left, 'in', left_model._search(domain, order='id'))]
return domain
HIERARCHY_FUNCS = {'child_of': child_of_domain,
'parent_of': parent_of_domain}
def pop():
""" Pop a leaf to process. """
return stack.pop()
def push(leaf, model, alias, internal=False):
""" Push a leaf to be processed right after. """
leaf = normalize_leaf(leaf)
check_leaf(leaf, internal)
stack.append((leaf, model, alias))
def pop_result():
return result_stack.pop()
def push_result(query, params):
result_stack.append((query, params))
# process domain from right to left; stack contains domain leaves, in
# the form: (leaf, corresponding model, corresponding table alias)
stack = []
for leaf in self.expression:
push(leaf, self.root_model, self.root_alias)
# stack of SQL expressions in the form: (expr, params)
result_stack = []
while stack:
# Get the next leaf to process
leaf, model, alias = pop()
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> convert and add directly to result
# ----------------------------------------
if is_operator(leaf):
if leaf == NOT_OPERATOR:
expr, params = pop_result()
push_result('(NOT (%s))' % expr, params)
else:
ops = {AND_OPERATOR: '(%s AND %s)', OR_OPERATOR: '(%s OR %s)'}
lhs, lhs_params = pop_result()
rhs, rhs_params = pop_result()
push_result(ops[leaf] % (lhs, rhs), lhs_params + rhs_params)
continue
if is_boolean(leaf):
expr, params = self.__leaf_to_sql(leaf, model, alias)
push_result(expr, params)
continue
# Get working variables
left, operator, right = leaf
path = left.split('.', 1)
field = model._fields.get(path[0])
comodel = model.env.get(getattr(field, 'comodel_name', None))
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
if not field:
raise ValueError("Invalid field %s.%s in leaf %s" % (model._name, path[0], str(leaf)))
elif field.inherited:
parent_model = model.env[field.related_field.model_name]
parent_fname = model._inherits[parent_model._name]
parent_alias = self.query.left_join(
alias, parent_fname, parent_model._table, 'id', parent_fname,
)
push(leaf, parent_model, parent_alias)
elif left == 'id' and operator in HIERARCHY_FUNCS:
ids2 = to_ids(right, model, leaf)
dom = HIERARCHY_FUNCS[operator](left, ids2, model)
for dom_leaf in dom:
push(dom_leaf, model, alias)
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked column: column.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the column, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about columns.property should not be necessary anymore
# as after transforming the column, it will go through this loop once again
# ----------------------------------------
elif len(path) > 1 and field.store and field.type == 'many2one' and field.auto_join:
# res_partner.state_id = res_partner__state_id.id
coalias = self.query.left_join(
alias, path[0], comodel._table, 'id', path[0],
)
push((path[1], operator, right), comodel, coalias)
elif len(path) > 1 and field.store and field.type == 'one2many' and field.auto_join:
# use a subquery bypassing access rules and business logic
domain = [(path[1], operator, right)] + field.get_domain_list(model)
query = comodel.with_context(**field.context)._where_calc(domain)
subquery, subparams = query.select('"%s"."%s"' % (comodel._table, field.inverse_name))
push(('id', 'inselect', (subquery, subparams)), model, alias, internal=True)
elif len(path) > 1 and field.store and field.auto_join:
raise NotImplementedError('auto_join attribute not supported on field %s' % field)
elif len(path) > 1 and field.store and field.type == 'many2one':
right_ids = comodel.with_context(active_test=False)._search([(path[1], operator, right)], order='id')
push((path[0], 'in', right_ids), model, alias)
# Making search easier when there is a left operand as one2many or many2many
elif len(path) > 1 and field.store and field.type in ('many2many', 'one2many'):
right_ids = comodel.with_context(**field.context)._search([(path[1], operator, right)], order='id')
push((path[0], 'in', right_ids), model, alias)
elif not field.store:
# Non-stored field should provide an implementation of search.
if not field.search:
# field does not support search!
_logger.error("Non-stored field %s cannot be searched.", field, exc_info=True)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# Ignore it: generate a dummy leaf.
domain = []
else:
# Let the field generate a domain.
if len(path) > 1:
right = comodel._search([(path[1], operator, right)], order='id')
operator = 'in'
domain = field.determine_domain(model, operator, right)
model._flush_search(domain, order='id')
for elem in normalize_domain(domain):
push(elem, model, alias, internal=True)
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif field.type == 'one2many' and operator in HIERARCHY_FUNCS:
ids2 = to_ids(right, comodel, leaf)
if field.comodel_name != model._name:
dom = HIERARCHY_FUNCS[operator](left, ids2, comodel, prefix=field.comodel_name)
else:
dom = HIERARCHY_FUNCS[operator]('id', ids2, model, parent=left)
for dom_leaf in dom:
push(dom_leaf, model, alias)
elif field.type == 'one2many':
domain = field.get_domain_list(model)
inverse_field = comodel._fields[field.inverse_name]
inverse_is_int = inverse_field.type in ('integer', 'many2one_reference')
unwrap_inverse = (lambda ids: ids) if inverse_is_int else (lambda recs: recs.ids)
if right is not False:
# determine ids2 in comodel
if isinstance(right, str):
op2 = (TERM_OPERATORS_NEGATION[operator]
if operator in NEGATIVE_TERM_OPERATORS else operator)
ids2 = comodel._name_search(right, domain or [], op2, limit=None)
elif isinstance(right, collections.abc.Iterable):
ids2 = right
else:
ids2 = [right]
if inverse_is_int and domain:
ids2 = comodel._search([('id', 'in', ids2)] + domain, order='id')
if inverse_field.store:
# In the condition, one must avoid subqueries to return
# NULL values, since it makes the IN test NULL instead
# of FALSE. This may discard expected results, as for
# instance "id NOT IN (42, NULL)" is never TRUE.
in_ = 'NOT IN' if operator in NEGATIVE_TERM_OPERATORS else 'IN'
if isinstance(ids2, Query):
if not inverse_field.required:
ids2.add_where(f'"{comodel._table}"."{inverse_field.name}" IS NOT NULL')
subquery, subparams = ids2.subselect(f'"{comodel._table}"."{inverse_field.name}"')
else:
subquery = f'SELECT "{inverse_field.name}" FROM "{comodel._table}" WHERE "id" IN %s'
if not inverse_field.required:
subquery += f' AND "{inverse_field.name}" IS NOT NULL'
subparams = [tuple(ids2) or (None,)]
push_result(f'("{alias}"."id" {in_} ({subquery}))', subparams)
else:
# determine ids1 in model related to ids2
recs = comodel.browse(ids2).sudo().with_context(prefetch_fields=False)
ids1 = unwrap_inverse(recs.mapped(inverse_field.name))
# rewrite condition in terms of ids1
op1 = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(('id', op1, ids1), model, alias)
else:
if inverse_field.store and not (inverse_is_int and domain):
# rewrite condition to match records with/without lines
op1 = 'inselect' if operator in NEGATIVE_TERM_OPERATORS else 'not inselect'
subquery = f'SELECT "{inverse_field.name}" FROM "{comodel._table}" WHERE "{inverse_field.name}" IS NOT NULL'
push(('id', op1, (subquery, [])), model, alias, internal=True)
else:
comodel_domain = [(inverse_field.name, '!=', False)]
if inverse_is_int and domain:
comodel_domain += domain
recs = comodel.search(comodel_domain, order='id').sudo().with_context(prefetch_fields=False)
# determine ids1 = records with lines
ids1 = unwrap_inverse(recs.mapped(inverse_field.name))
# rewrite condition to match records with/without lines
op1 = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(('id', op1, ids1), model, alias)
elif field.type == 'many2many':
rel_table, rel_id1, rel_id2 = field.relation, field.column1, field.column2
if operator in HIERARCHY_FUNCS:
# determine ids2 in comodel
ids2 = to_ids(right, comodel, leaf)
domain = HIERARCHY_FUNCS[operator]('id', ids2, comodel)
ids2 = comodel._search(domain, order='id')
# rewrite condition in terms of ids2
if comodel == model:
push(('id', 'in', ids2), model, alias)
else:
rel_alias = _generate_table_alias(alias, field.name)
push_result(f"""
EXISTS (
SELECT 1 FROM "{rel_table}" AS "{rel_alias}"
WHERE "{rel_alias}"."{rel_id1}" = "{alias}".id
AND "{rel_alias}"."{rel_id2}" IN %s
)
""", [tuple(ids2) or (None,)])
elif right is not False:
# determine ids2 in comodel
if isinstance(right, str):
domain = field.get_domain_list(model)
op2 = (TERM_OPERATORS_NEGATION[operator]
if operator in NEGATIVE_TERM_OPERATORS else operator)
ids2 = comodel._name_search(right, domain or [], op2, limit=None)
elif isinstance(right, collections.abc.Iterable):
ids2 = right
else:
ids2 = [right]
if isinstance(ids2, Query):
# rewrite condition in terms of ids2
subquery, params = ids2.subselect()
term_id2 = f"({subquery})"
else:
# rewrite condition in terms of ids2
term_id2 = "%s"
params = [tuple(it for it in ids2 if it) or (None,)]
exists = 'NOT EXISTS' if operator in NEGATIVE_TERM_OPERATORS else 'EXISTS'
rel_alias = _generate_table_alias(alias, field.name)
push_result(f"""
{exists} (
SELECT 1 FROM "{rel_table}" AS "{rel_alias}"
WHERE "{rel_alias}"."{rel_id1}" = "{alias}".id
AND "{rel_alias}"."{rel_id2}" IN {term_id2}
)
""", params)
else:
# rewrite condition to match records with/without relations
exists = 'EXISTS' if operator in NEGATIVE_TERM_OPERATORS else 'NOT EXISTS'
rel_alias = _generate_table_alias(alias, field.name)
push_result(f"""
{exists} (
SELECT 1 FROM "{rel_table}" AS "{rel_alias}"
WHERE "{rel_alias}"."{rel_id1}" = "{alias}".id
)
""", [])
elif field.type == 'many2one':
if operator in HIERARCHY_FUNCS:
ids2 = to_ids(right, comodel, leaf)
if field.comodel_name != model._name:
dom = HIERARCHY_FUNCS[operator](left, ids2, comodel, prefix=field.comodel_name)
else:
dom = HIERARCHY_FUNCS[operator]('id', ids2, model, parent=left)
for dom_leaf in dom:
push(dom_leaf, model, alias)
else:
def _get_expression(comodel, left, right, operator):
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = comodel.with_context(active_test=False)._name_search(right, [], operator, limit=None)
if operator in NEGATIVE_TERM_OPERATORS:
res_ids = list(res_ids) + [False] # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, str) or \
isinstance(right, (tuple, list)) and right and all(isinstance(item, str) for item in right):
push(_get_expression(comodel, left, right, operator), model, alias)
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
expr, params = self.__leaf_to_sql(leaf, model, alias)
push_result(expr, params)
# -------------------------------------------------
# BINARY FIELDS STORED IN ATTACHMENT
# -> check for null only
# -------------------------------------------------
elif field.type == 'binary' and field.attachment:
if operator in ('=', '!=') and not right:
inselect_operator = 'inselect' if operator in NEGATIVE_TERM_OPERATORS else 'not inselect'
subselect = "SELECT res_id FROM ir_attachment WHERE res_model=%s AND res_field=%s"
params = (model._name, left)
push(('id', inselect_operator, (subselect, params)), model, alias, internal=True)
else:
_logger.error("Binary field '%s' stored in attachment: ignore %s %s %s",
field.string, left, operator, reprlib.repr(right))
push(TRUE_LEAF, model, alias)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# column when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if field.type == 'datetime' and right:
if isinstance(right, str) and len(right) == 10:
if operator in ('>', '<='):
right += ' 23:59:59'
else:
right += ' 00:00:00'
push((left, operator, right), model, alias)
elif isinstance(right, date) and not isinstance(right, datetime):
if operator in ('>', '<='):
right = datetime.combine(right, time.max)
else:
right = datetime.combine(right, time.min)
push((left, operator, right), model, alias)
else:
expr, params = self.__leaf_to_sql(leaf, model, alias)
push_result(expr, params)
elif field.translate is True and right:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
if sql_operator in ('in', 'not in'):
right = tuple(right)
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
left = unaccent(model._generate_translated_field(alias, left, self.query))
instr = unaccent('%s')
push_result(f"{left} {sql_operator} {instr}", [right])
else:
expr, params = self.__leaf_to_sql(leaf, model, alias)
push_result(expr, params)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> put result in self.result and self.query
# ----------------------------------------
[self.result] = result_stack
where_clause, where_params = self.result
self.query.add_where(where_clause, where_params)
def __leaf_to_sql(self, leaf, model, alias):
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields, \
"Invalid field %r in domain term %r" % (left, leaf)
assert not isinstance(right, BaseModel), \
"Invalid value %r in domain term %r" % (right, leaf)
table_alias = '"%s"' % alias
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = list(right[1])
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = list(right[1])
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if (operator == 'in' and right) or (operator == 'not in' and not right):
query = '(%s."%s" IS NOT NULL)' % (table_alias, left)
else:
query = '(%s."%s" IS NULL)' % (table_alias, left)
params = []
elif isinstance(right, Query):
subquery, subparams = right.subselect()
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, subquery)
params = subparams
elif isinstance(right, (list, tuple)):
if model._fields[left].type == "boolean":
params = [it for it in (True, False) if it in right]
check_null = False in right
else:
params = [it for it in right if it != False]
check_null = len(params) < len(right)
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
field = model._fields[left]
instr = ','.join([field.column_format] * len(params))
params = [field.convert_to_column(p, model, validate=False) for p in params]
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if (operator == 'in' and check_null) or (operator == 'not in' and not check_null):
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif operator == 'not in' and check_null:
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif left in model and model._fields[left].type == "boolean" and ((operator == '=' and right is False) or (operator == '!=' and right is True)):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif left in model and model._fields[left].type == "boolean" and ((operator == '!=' and right is False) or (operator == '==' and right is True)):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql((left, '=', right), model, alias)
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
cast = '::text' if sql_operator.endswith('like') else ''
if left not in model:
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
format = '%s' if need_wildcard else model._fields[left].column_format
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
column = '%s.%s' % (table_alias, _quote(left))
query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format))
if (need_wildcard and not right) or (right and operator in NEGATIVE_TERM_OPERATORS):
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if need_wildcard:
params = ['%%%s%%' % pycompat.to_text(right)]
else:
field = model._fields[left]
params = [field.convert_to_column(right, model, validate=False)]
return query, params
def to_sql(self):
warnings.warn("deprecated expression.to_sql(), use expression.query instead",
DeprecationWarning)
return self.result
| 45.777064 | 49,897 |
167 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import models
class A(models.Model):
_name = _description = 'tab.a'
class B(models.Model):
_name = _description = 'tab.b'
| 18.555556 | 167 |
174 |
py
|
PYTHON
|
15.0
|
{
'name': "Test Action Bindings",
'category': 'Hidden/Tests',
'data': [
'ir.model.access.csv',
'test_data.xml',
],
'license': 'LGPL-3',
}
| 19.333333 | 174 |
3,606 |
py
|
PYTHON
|
15.0
|
from odoo.tests import common
class TestActionBindings(common.TransactionCase):
def test_bindings(self):
""" check the action bindings on models """
Actions = self.env['ir.actions.actions']
# first make sure there is no bound action
self.env.ref('base.action_partner_merge').unlink()
bindings = Actions.get_bindings('res.partner')
self.assertFalse(bindings['action'])
self.assertFalse(bindings['report'])
# create action bindings, and check the returned bindings
action1 = self.env.ref('base.action_attachment')
action2 = self.env.ref('base.ir_default_menu_action')
action3 = self.env['ir.actions.report'].search([('groups_id', '=', False)], limit=1)
action1.binding_model_id = action2.binding_model_id \
= action3.binding_model_id \
= self.env['ir.model']._get('res.partner')
bindings = Actions.get_bindings('res.partner')
self.assertItemsEqual(
bindings['action'],
(action1 + action2).read(['name', 'binding_view_types']),
"Wrong action bindings",
)
self.assertItemsEqual(
bindings['report'],
action3.read(['name', 'binding_view_types']),
"Wrong action bindings",
)
# add a group on an action, and check that it is not returned
group = self.env.ref('base.group_user')
action2.groups_id += group
self.env.user.groups_id -= group
bindings = Actions.get_bindings('res.partner')
self.assertItemsEqual(
bindings['action'],
action1.read(['name', 'binding_view_types']),
"Wrong action bindings",
)
self.assertItemsEqual(
bindings['report'],
action3.read(['name', 'binding_view_types']),
"Wrong action bindings",
)
class TestBindingViewFilters(common.TransactionCase):
def test_act_window(self):
A = self.env['tab.a']
form_act = A.fields_view_get(toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in form_act],
['Action 1', 'Action 2', 'Action 3'],
"forms should have all actions")
list_act = A.fields_view_get(view_type='tree', toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in list_act],
['Action 1', 'Action 3'],
"lists should not have the form-only action")
kanban_act = A.fields_view_get(view_type='kanban', toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in kanban_act],
['Action 1'],
"kanban should only have the universal action")
def test_act_record(self):
B = self.env['tab.b']
form_act = B.fields_view_get(toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in form_act],
['Record 1', 'Record 2', 'Record 3'],
"forms should have all actions")
list_act = B.fields_view_get(view_type='tree', toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in list_act],
['Record 1', 'Record 3'],
"lists should not have the form-only action")
kanban_act = B.fields_view_get(view_type='kanban', toolbar=True)['toolbar']['action']
self.assertEqual(
[a['name'] for a in kanban_act],
['Record 1'],
"kanban should only have the universal action")
| 38.774194 | 3,606 |
624 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class test_uninstall_model(models.Model):
"""
This model uses different types of columns to make it possible to test
the uninstall feature of Odoo.
"""
_name = 'test_uninstall.model'
_description = 'Testing Uninstall Model'
name = fields.Char('Name')
ref = fields.Many2one('res.users', string='User')
rel = fields.Many2many('res.users', string='Users')
_sql_constraints = [
('name_uniq', 'unique (name)', 'Each name must be unique.'),
]
| 31.2 | 624 |
392 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'test-uninstall',
'version': '0.1',
'category': 'Hidden/Tests',
'description': """A module to test the uninstall feature.""",
'depends': ['base'],
'data': ['ir.model.access.csv'],
'installable': True,
'auto_install': False,
'license': 'LGPL-3',
}
| 30.153846 | 392 |
700 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Test Main Flow',
'version': '1.0',
'category': 'Hidden/Tests',
'description': """
This module will test the main workflow of Odoo.
It will install some main apps and will try to execute the most important actions.
""",
'depends': ['web_tour', 'crm', 'sale_timesheet', 'purchase_stock', 'mrp', 'account'],
'installable': True,
'post_init_hook': '_auto_install_enterprise_dependencies',
'assets': {
'web.assets_tests': [
# inside .
'test_main_flows/static/tests/tours/main_flow.js',
],
},
'license': 'LGPL-3',
}
| 33.333333 | 700 |
3,352 |
py
|
PYTHON
|
15.0
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
import odoo.tests
import unittest
class BaseTestUi(odoo.tests.HttpCase):
def main_flow_tour(self):
# Enable Make to Order
self.env.ref('stock.route_warehouse0_mto').active = True
# Define minimal accounting data to run without CoA
a_expense = self.env['account.account'].create({
'code': 'X2120',
'name': 'Expenses - (test)',
'user_type_id': self.env.ref('account.data_account_type_expenses').id,
})
a_recv = self.env['account.account'].create({
'code': 'X1012',
'name': 'Debtors - (test)',
'reconcile': True,
'user_type_id': self.env.ref('account.data_account_type_receivable').id,
})
a_pay = self.env['account.account'].create({
'code': 'X1111',
'name': 'Creditors - (test)',
'user_type_id': self.env.ref('account.data_account_type_payable').id,
'reconcile': True,
})
a_sale = self.env['account.account'].create({
'code': 'X2020',
'name': 'Product Sales - (test)',
'user_type_id': self.env.ref('account.data_account_type_revenue').id,
})
bnk = self.env['account.account'].create({
'code': 'X1014',
'name': 'Bank Current Account - (test)',
'user_type_id': self.env.ref('account.data_account_type_liquidity').id,
})
Property = self.env['ir.property']
Property._set_default('property_account_receivable_id', 'res.partner', a_recv, self.env.company)
Property._set_default('property_account_payable_id', 'res.partner', a_pay, self.env.company)
Property._set_default('property_account_position_id', 'res.partner', False, self.env.company)
Property._set_default('property_account_expense_categ_id', 'product.category', a_expense, self.env.company)
Property._set_default('property_account_income_categ_id', 'product.category', a_sale, self.env.company)
self.expenses_journal = self.env['account.journal'].create({
'name': 'Vendor Bills - Test',
'code': 'TEXJ',
'type': 'purchase',
'refund_sequence': True,
})
self.bank_journal = self.env['account.journal'].create({
'name': 'Bank - Test',
'code': 'TBNK',
'type': 'bank',
'default_account_id': bnk.id,
})
self.sales_journal = self.env['account.journal'].create({
'name': 'Customer Invoices - Test',
'code': 'TINV',
'type': 'sale',
'default_account_id': a_sale.id,
'refund_sequence': True,
})
self.start_tour("/web", 'main_flow_tour', login="admin", timeout=180)
@odoo.tests.tagged('post_install', '-at_install')
class TestUi(BaseTestUi):
def test_01_main_flow_tour(self):
self.main_flow_tour()
@odoo.tests.tagged('post_install', '-at_install')
class TestUiMobile(BaseTestUi):
browser_size = '375x667'
def test_01_main_flow_tour_mobile(self):
if odoo.release.version_info[-1] == 'e':
self.main_flow_tour()
else:
raise unittest.SkipTest("Mobile testing not needed in community")
| 38.528736 | 3,352 |
228 |
py
|
PYTHON
|
15.0
|
{
'name': 'test installation of data module',
'description': 'Test data module (see test_data_module) installation',
'version': '0.0.1',
'category': 'Hidden/Tests',
'sequence': 10,
'license': 'LGPL-3',
}
| 28.5 | 228 |
506 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo.tests import common
class TestDataModuleInstalled(common.TransactionCase):
""" Test that the fake data module `test_data_module` is correctly installed.
The use case of this test is that odoo supports installation of data modules only without `__init__.py`.
"""
def test_data_module_installed(self):
data_module = self.env['ir.module.module'].search([('name', '=', 'test_data_module')])
self.assertEqual(data_module.state, 'installed')
| 38.923077 | 506 |
2,546 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class SomeObj(models.Model):
_name = 'test_access_right.some_obj'
_description = 'Object For Test Access Right'
val = fields.Integer()
categ_id = fields.Many2one('test_access_right.obj_categ')
company_id = fields.Many2one('res.company')
forbidden = fields.Integer(
groups='test_access_rights.test_group,!base.group_no_one,base.group_user,!base.group_public',
default=5
)
forbidden2 = fields.Integer(groups='test_access_rights.test_group')
forbidden3 = fields.Integer(groups=fields.NO_ACCESS)
class Container(models.Model):
_name = 'test_access_right.container'
_description = 'Test Access Right Container'
some_ids = fields.Many2many('test_access_right.some_obj', 'test_access_right_rel', 'container_id', 'some_id')
class Parent(models.Model):
_name = 'test_access_right.parent'
_description = 'Object for testing related access rights'
_inherits = {'test_access_right.some_obj': 'obj_id'}
obj_id = fields.Many2one('test_access_right.some_obj', required=True, ondelete='restrict')
class ObjCateg(models.Model):
_name = 'test_access_right.obj_categ'
_description = "Context dependent searchable model"
name = fields.Char(required=True)
def search(self, args, **kwargs):
if self.env.context.get('only_media'):
args += [('name', '=', 'Media')]
return super(ObjCateg, self).search(args, **kwargs)
class FakeTicket(models.Model):
"""We want to simulate a record that would typically be accessed by a portal user,
with a relational field to records that could not be accessed by a portal user.
"""
_name = 'test_access_right.ticket'
_description = 'Fake ticket For Test Access Right'
name = fields.Char()
message_partner_ids = fields.Many2many(comodel_name='res.partner')
class ResPartner(models.Model):
"""User inherits partner, so we are implicitly adding these fields to User
This essentially reproduces the (sad) situation introduced by account.
"""
_name = 'res.partner'
_inherit = 'res.partner'
currency_id = fields.Many2one('res.currency', compute='_get_company_currency', readonly=True)
monetary = fields.Monetary() # implicitly depends on currency_id as currency_field
def _get_company_currency(self):
for partner in self:
partner.currency_id = partner.sudo().company_id.currency_id
| 36.898551 | 2,546 |
281 |
py
|
PYTHON
|
15.0
|
{
'name': 'test of access rights and rules',
'description': "Testing of access restrictions",
'version': '0.0.1',
'category': 'Hidden/Tests',
'data': [
'ir.model.access.csv',
'security.xml',
'data.xml',
],
'license': 'LGPL-3',
}
| 23.416667 | 281 |
16,198 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
from odoo import SUPERUSER_ID, Command
from odoo.exceptions import AccessError
from odoo.tests import common, TransactionCase
class Feedback(TransactionCase):
def setUp(self):
super().setUp()
self.group0 = self.env['res.groups'].create({'name': "Group 0"})
self.group1 = self.env['res.groups'].create({'name': "Group 1"})
self.group2 = self.env['res.groups'].create({'name': "Group 2"})
self.user = self.env['res.users'].create({
'login': 'bob',
'name': "Bob Bobman",
'groups_id': [Command.set(self.group2.ids)],
})
class TestSudo(Feedback):
""" Test the behavior of method sudo(). """
def test_sudo(self):
record = self.env['test_access_right.some_obj'].create({'val': 5})
user1 = self.user
partner_demo = self.env['res.partner'].create({
'name': 'Marc Demo',
})
user2 = self.env['res.users'].create({
'login': 'demo2',
'password': 'demo2',
'partner_id': partner_demo.id,
'groups_id': [Command.set([self.env.ref('base.group_user').id, self.env.ref('base.group_partner_manager').id])],
})
# with_user(user)
record1 = record.with_user(user1)
self.assertEqual(record1.env.uid, user1.id)
self.assertFalse(record1.env.su)
record2 = record1.with_user(user2)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
# the superuser is always in superuser mode
record3 = record2.with_user(SUPERUSER_ID)
self.assertEqual(record3.env.uid, SUPERUSER_ID)
self.assertTrue(record3.env.su)
# sudo()
surecord1 = record1.sudo()
self.assertEqual(surecord1.env.uid, user1.id)
self.assertTrue(surecord1.env.su)
surecord2 = record2.sudo()
self.assertEqual(surecord2.env.uid, user2.id)
self.assertTrue(surecord2.env.su)
surecord3 = record3.sudo()
self.assertEqual(surecord3.env.uid, SUPERUSER_ID)
self.assertTrue(surecord3.env.su)
# sudo().sudo()
surecord1 = surecord1.sudo()
self.assertEqual(surecord1.env.uid, user1.id)
self.assertTrue(surecord1.env.su)
# sudo(False)
record1 = surecord1.sudo(False)
self.assertEqual(record1.env.uid, user1.id)
self.assertFalse(record1.env.su)
record2 = surecord2.sudo(False)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
record3 = surecord3.sudo(False)
self.assertEqual(record3.env.uid, SUPERUSER_ID)
self.assertTrue(record3.env.su)
# sudo().with_user(user)
record2 = surecord1.with_user(user2)
self.assertEqual(record2.env.uid, user2.id)
self.assertFalse(record2.env.su)
class TestACLFeedback(Feedback):
""" Tests that proper feedback is returned on ir.model.access errors
"""
def setUp(self):
super().setUp()
ACL = self.env['ir.model.access']
m = self.env['ir.model'].search([('model', '=', 'test_access_right.some_obj')])
ACL.search([('model_id', '=', m.id)]).unlink()
ACL.create({
'name': "read",
'model_id': m.id,
'group_id': self.group1.id,
'perm_read': True,
})
ACL.create({
'name': "create-and-read",
'model_id': m.id,
'group_id': self.group0.id,
'perm_read': True,
'perm_create': True,
})
self.record = self.env['test_access_right.some_obj'].create({'val': 5})
# values are in cache, clear them up for the test
ACL.flush()
ACL.invalidate_cache()
def test_no_groups(self):
""" Operation is never allowed
"""
with self.assertRaises(AccessError) as ctx:
self.record.with_user(self.user).write({'val': 10})
self.assertEqual(
ctx.exception.args[0],
"""You are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
No group currently allows this operation.
Contact your administrator to request access if necessary."""
)
def test_one_group(self):
with self.assertRaises(AccessError) as ctx:
self.env(user=self.user)['test_access_right.some_obj'].create({
'val': 1
})
self.assertEqual(
ctx.exception.args[0],
"""You are not allowed to create 'Object For Test Access Right' (test_access_right.some_obj) records.
This operation is allowed for the following groups:\n\t- Group 0
Contact your administrator to request access if necessary."""
)
def test_two_groups(self):
r = self.record.with_user(self.user)
expected = """You are not allowed to access 'Object For Test Access Right' (test_access_right.some_obj) records.
This operation is allowed for the following groups:\n\t- Group 0\n\t- Group 1
Contact your administrator to request access if necessary."""
with self.assertRaises(AccessError) as ctx:
# noinspection PyStatementEffect
r.val
self.assertEqual(ctx.exception.args[0], expected)
with self.assertRaises(AccessError) as ctx:
r.read(['val'])
self.assertEqual(ctx.exception.args[0], expected)
class TestIRRuleFeedback(Feedback):
""" Tests that proper feedback is returned on ir.rule errors
"""
def setUp(self):
super().setUp()
self.model = self.env['ir.model'].search([('model', '=', 'test_access_right.some_obj')])
self.record = self.env['test_access_right.some_obj'].create({
'val': 0,
}).with_user(self.user)
def _make_rule(self, name, domain, global_=False, attr='write'):
res = self.env['ir.rule'].create({
'name': name,
'model_id': self.model.id,
'groups': [] if global_ else [Command.link(self.group2.id)],
'domain_force': domain,
'perm_read': False,
'perm_write': False,
'perm_create': False,
'perm_unlink': False,
'perm_' + attr: True,
})
return res
def test_local(self):
self._make_rule('rule 0', '[("val", "=", 42)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Contact your administrator to request access if necessary.""")
# debug mode
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
p = self.env['test_access_right.parent'].create({'obj_id': self.record.id})
with self.assertRaisesRegex(
AccessError,
r"Implicitly accessed through 'Object for testing related access rights' \(test_access_right.parent\)\.",
):
p.with_user(self.user).write({'val': 1})
def test_locals(self):
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]')
self._make_rule('rule 1', '[("val", "=", 78)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_globals_all(self):
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[("val", "=", 78)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_globals_any(self):
""" Global rules are AND-eded together, so when an access fails it
might be just one of the rules, and we want an exact listing
"""
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[(1, "=", 1)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_combination(self):
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', '[("val", "=", 42)]', global_=True)
self._make_rule('rule 1', '[(1, "=", 1)]', global_=True)
self._make_rule('rule 2', '[(0, "=", 1)]')
self._make_rule('rule 3', '[("val", "=", 55)]')
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 2
- rule 3
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_warn_company(self):
""" If one of the failing rules mentions company_id, add a note that
this might be a multi-company issue.
"""
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', "[('company_id', '=', user.company_id.id)]")
self._make_rule('rule 1', '[("val", "=", 0)]', global_=True)
with self.assertRaises(AccessError) as ctx:
self.record.write({'val': 1})
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to modify 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
Note: this might be a multi-company issue.
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
def test_read(self):
""" because of prefetching, read() goes through a different codepath
to apply rules
"""
self.env.ref('base.group_no_one').write({'users': [Command.link(self.user.id)]})
self.env.ref('base.group_user').write({'users': [Command.link(self.user.id)]})
self._make_rule('rule 0', "[('company_id', '=', user.company_id.id)]", attr='read')
self._make_rule('rule 1', '[("val", "=", 1)]', global_=True, attr='read')
with self.assertRaises(AccessError) as ctx:
_ = self.record.val
self.assertEqual(
ctx.exception.args[0],
"""Due to security restrictions, you are not allowed to access 'Object For Test Access Right' (test_access_right.some_obj) records.
Records: %s (id=%s)
User: %s (id=%s)
This restriction is due to the following rules:
- rule 0
- rule 1
Note: this might be a multi-company issue.
Contact your administrator to request access if necessary.""" % (self.record.display_name, self.record.id, self.user.name, self.user.id)
)
p = self.env['test_access_right.parent'].create({'obj_id': self.record.id})
p.flush()
p.invalidate_cache()
with self.assertRaisesRegex(
AccessError,
r"Implicitly accessed through 'Object for testing related access rights' \(test_access_right.parent\)\.",
):
p.with_user(self.user).val
class TestFieldGroupFeedback(Feedback):
def setUp(self):
super().setUp()
self.record = self.env['test_access_right.some_obj'].create({
'val': 0,
}).with_user(self.user)
def test_read(self):
self.env.ref('base.group_no_one').write(
{'users': [Command.link(self.user.id)]})
with self.assertRaises(AccessError) as ctx:
_ = self.record.forbidden
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: read
User: %s
Fields:
- forbidden (allowed for groups 'User types / Internal User', 'Test Group'; forbidden for groups 'Extra Rights / Technical Features', 'User types / Public')"""
% self.user.id
)
with self.assertRaises(AccessError) as ctx:
_ = self.record.forbidden3
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: read
User: %s
Fields:
- forbidden3 (always forbidden)""" % self.user.id
)
def test_write(self):
self.env.ref('base.group_no_one').write(
{'users': [Command.link(self.user.id)]})
with self.assertRaises(AccessError) as ctx:
self.record.write({'forbidden': 1, 'forbidden2': 2})
self.assertEqual(
ctx.exception.args[0],
"""The requested operation can not be completed due to security restrictions.
Document type: Object For Test Access Right (test_access_right.some_obj)
Operation: write
User: %s
Fields:
- forbidden (allowed for groups 'User types / Internal User', 'Test Group'; forbidden for groups 'Extra Rights / Technical Features', 'User types / Public')
- forbidden2 (allowed for groups 'Test Group')"""
% self.user.id
)
| 38.112941 | 16,198 |
5,498 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import AccessError
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
from odoo import Command
class TestRules(TransactionCase):
def setUp(self):
super(TestRules, self).setUp()
ObjCateg = self.env['test_access_right.obj_categ']
SomeObj = self.env['test_access_right.some_obj']
self.categ1 = ObjCateg.create({'name': 'Food'}).id
self.id1 = SomeObj.create({'val': 1, 'categ_id': self.categ1}).id
self.id2 = SomeObj.create({'val': -1, 'categ_id': self.categ1}).id
# create a global rule forbidding access to records with a negative
# (or zero) val
self.env['ir.rule'].create({
'name': 'Forbid negatives',
'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id,
'domain_force': "[('val', '>', 0)]"
})
# create a global rule that forbid access to records without
# categories, the search is part of the test
self.env['ir.rule'].create({
'name': 'See all categories',
'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id,
'domain_force': "[('categ_id', 'in', user.env['test_access_right.obj_categ'].search([]).ids)]"
})
@mute_logger('odoo.addons.base.models.ir_rule')
def test_basic_access(self):
env = self.env(user=self.browse_ref('base.public_user'))
# put forbidden record in cache
browse2 = env['test_access_right.some_obj'].browse(self.id2)
# this is the one we want
browse1 = env['test_access_right.some_obj'].browse(self.id1)
# this should not blow up
self.assertEqual(browse1.val, 1)
# but this should
browse1.invalidate_cache(['val'])
with self.assertRaises(AccessError):
self.assertEqual(browse2.val, -1)
@mute_logger('odoo.addons.base.models.ir_rule')
def test_group_rule(self):
env = self.env(user=self.browse_ref('base.public_user'))
# we forbid access to the public group, to which the public user belongs
self.env['ir.rule'].create({
'name': 'Forbid public group',
'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id,
'groups': [Command.set([self.browse_ref('base.group_public').id])],
'domain_force': "[(0, '=', 1)]"
})
browse2 = env['test_access_right.some_obj'].browse(self.id2)
browse1 = env['test_access_right.some_obj'].browse(self.id1)
# everything should blow up
(browse1 + browse2).invalidate_cache(['val'])
with self.assertRaises(AccessError):
self.assertEqual(browse2.val, -1)
with self.assertRaises(AccessError):
self.assertEqual(browse1.val, 1)
def test_many2many(self):
""" Test assignment of many2many field where rules apply. """
ids = [self.id1, self.id2]
# create container as superuser, connected to all some_objs
container_admin = self.env['test_access_right.container'].create({'some_ids': [Command.set(ids)]})
self.assertItemsEqual(container_admin.some_ids.ids, ids)
# check the container as the public user
container_user = container_admin.with_user(self.browse_ref('base.public_user'))
container_user.invalidate_cache(['some_ids'])
self.assertItemsEqual(container_user.some_ids.ids, [self.id1])
# this should not fail
container_user.write({'some_ids': [Command.set(ids)]})
container_user.invalidate_cache(['some_ids'])
self.assertItemsEqual(container_user.some_ids.ids, [self.id1])
container_admin.invalidate_cache(['some_ids'])
self.assertItemsEqual(container_admin.some_ids.ids, ids)
# this removes all records
container_user.write({'some_ids': [Command.clear()]})
container_user.invalidate_cache(['some_ids'])
self.assertItemsEqual(container_user.some_ids.ids, [])
container_admin.invalidate_cache(['some_ids'])
self.assertItemsEqual(container_admin.some_ids.ids, [])
def test_access_rule_performance(self):
env = self.env(user=self.browse_ref('base.public_user'))
Model = env['test_access_right.some_obj']
with self.assertQueryCount(0):
Model._filter_access_rules('read')
def test_no_context_in_ir_rules(self):
""" The context should not impact the ir rules. """
env = self.env(user=self.browse_ref('base.public_user'))
ObjCateg = self.env['test_access_right.obj_categ']
SomeObj = self.env['test_access_right.some_obj']
# validate the effect of context on category search, there are
# no existing media category
self.assertTrue(ObjCateg.search([]))
self.assertFalse(ObjCateg.with_context(only_media=True).search([]))
# record1 is food and is accessible with an empy context
ObjCateg.clear_caches()
records = SomeObj.search([('id', '=', self.id1)])
self.assertTrue(records)
# it should also be accessible as the context is not used when
# searching for SomeObjs
ObjCateg.clear_caches()
records = SomeObj.with_context(only_media=True).search([('id', '=', self.id1)])
self.assertTrue(records)
| 43.634921 | 5,498 |
1,824 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.base.tests.common import TransactionCaseWithUserDemo
class TestMonetaryAccess(TransactionCaseWithUserDemo):
def test_monetary_access_create(self):
"""Monetary fields that depend on compute/related currency
have never really been supported by the ORM.
However most currency fields are related.
This limitation can cause monetary fields to not be rounded,
as well as trigger spurious ACL errors.
"""
user_admin = self.env.ref("base.user_admin")
user_demo = self.user_demo.with_user(user_admin)
# this would raise without the fix introduced in this commit
new_user = user_demo.copy({'monetary': 1/3})
new_user.partner_id.company_id = new_user.company_id
# The following is here to document how the ORM behaves, not really part of the test;
# in particular these specific points highlight the discrepancy between what is sent
# to the database and what we get on the ORM side.
# (to be fair, these are pre-existing ORM limitations that should have been avoided
# by using more careful field definitions and testing)
self.assertEqual(new_user.currency_id.id, False,
"The cache contains the wrong value for currency.")
self.assertEqual(new_user.monetary, 1/3,
"Because of previous point, no rounding was done.")
new_user.invalidate_cache()
self.assertEqual(new_user.currency_id.rounding, 0.01,
"We now get the correct currency.")
self.assertEqual(new_user.monetary, 0.33,
"The value was rounded when added to the cache.")
| 48 | 1,824 |
2,630 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
from odoo import Command
@odoo.tests.tagged('-at_install', 'post_install')
class TestAccess(odoo.tests.HttpCase):
def setUp(self):
super(TestAccess, self).setUp()
self.portal_user = self.env['res.users'].create({
'login': 'P',
'name': 'P',
'groups_id': [Command.set([self.env.ref('base.group_portal').id])],
})
# a partner that can't be read by the portal user, would typically be a user's
self.internal_user_partner = self.env['res.partner'].create({'name': 'I'})
self.document = self.env['test_access_right.ticket'].create({
'name': 'Need help here',
'message_partner_ids': [Command.set([self.portal_user.partner_id.id,
self.internal_user_partner.id])],
})
def test_check_access(self):
"""Typically, a document consulted by a portal user P
will point to other records that P cannot read.
For example, if P wants to consult a ticket of his,
the ticket will have a reviewer or assigned user that is internal,
and which partner cannot be read by P.
This should not block P from accessing the ticket.
"""
document = self.document.with_user(self.portal_user)
# at this point, some fields might already be loaded in cache.
# if so, it means we would bypass the ACL when trying to read the field
# while this is bad, this is not the object of this test
self.internal_user_partner.invalidate_cache(fnames=['active'])
# from portal's _document_check_access:
document.check_access_rights('read')
document.check_access_rule('read')
# no raise, because we are supposed to be able to read our ticket
def test_name_search_with_sudo(self):
"""Check that _name_search return correct values with sudo
"""
no_access_user = self.env['res.users'].create({
'login': 'no_access',
'name': 'no_access',
'groups_id': [Command.clear()],
})
document = self.env['test_access_right.ticket'].with_user(no_access_user)
res = document.sudo().name_search('Need help here')
#Invalide cache in case the name is already there
#and will not trigget check_access_rights when
#the name_get will access the name
self.document.invalidate_cache(fnames=['name'])
self.assertEqual(res[0][1], "Need help here")
| 45.344828 | 2,630 |
205 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
{
'name': 'test mimetypes-guessing',
'version': '0.1',
'category': 'Hidden/Tests',
'description': """A module to generate exceptions.""",
'license': 'LGPL-3',
}
| 25.625 | 205 |
1,885 |
py
|
PYTHON
|
15.0
|
# -*- coding: utf-8 -*-
import os.path
from odoo.tests.common import BaseCase
from odoo.tools.mimetypes import guess_mimetype
def contents(extension):
with open(os.path.join(
os.path.dirname(__file__),
'testfiles',
'case.{}'.format(extension)
), 'rb') as f:
return f.read()
class TestMimeGuessing(BaseCase):
def test_doc(self):
self.assertEqual(
guess_mimetype(contents('doc')),
'application/msword'
)
def test_xls(self):
self.assertEqual(
guess_mimetype(contents('xls')),
'application/vnd.ms-excel'
)
def test_docx(self):
self.assertEqual(
guess_mimetype(contents('docx')),
'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
def test_xlsx(self):
self.assertEqual(
guess_mimetype(contents('xlsx')),
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
def test_odt(self):
self.assertEqual(
guess_mimetype(contents('odt')),
'application/vnd.oasis.opendocument.text'
)
def test_ods(self):
self.assertEqual(
guess_mimetype(contents('ods')),
'application/vnd.oasis.opendocument.spreadsheet'
)
def test_zip(self):
self.assertEqual(
guess_mimetype(contents('zip')),
'application/zip'
)
def test_gif(self):
self.assertEqual(
guess_mimetype(contents('gif')),
'image/gif'
)
def test_jpeg(self):
self.assertEqual(
guess_mimetype(contents('jpg')),
'image/jpeg'
)
def test_unknown(self):
self.assertEqual(
guess_mimetype(contents('csv')),
'application/octet-stream'
)
| 27.318841 | 1,885 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.