size
int64 0
304k
| ext
stringclasses 1
value | lang
stringclasses 1
value | branch
stringclasses 1
value | content
stringlengths 0
304k
| avg_line_length
float64 0
238
| max_line_length
int64 0
304k
|
---|---|---|---|---|---|---|
2,123 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Tecnativa - Ernesto Tejeda
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.exceptions import ValidationError
from odoo.tests import TransactionCase
class TestProductCategoryActive(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
categ_obj = cls.env["product.category"]
product_obj = cls.env["product.template"]
cls.parent_categ = categ_obj.create({"name": "Parent category"})
cls.child_1 = categ_obj.create(
{"name": "child 1", "parent_id": cls.parent_categ.id}
)
cls.child_2 = categ_obj.create(
{"name": "child 2", "parent_id": cls.parent_categ.id}
)
cls.product_1 = product_obj.create({"name": "Product 1"})
def test_dont_archive_non_empty_categories(self):
self.assertTrue(self.child_1.active)
self.assertTrue(self.child_2.active)
self.assertTrue(self.parent_categ.active)
self.product_1.categ_id = self.child_1.id
with self.assertRaises(ValidationError):
self.parent_categ.active = False
with self.assertRaises(ValidationError):
(self.child_1 | self.child_2).write({"active": False})
with self.assertRaises(ValidationError):
self.child_1.active = False
def test_archive_empty_categories(self):
self.assertTrue(self.child_1.active)
self.assertTrue(self.parent_categ.active)
self.child_1.active = False
self.parent_categ.active = False
self.assertFalse(self.child_1.active)
self.assertFalse(self.parent_categ.active)
def test_archive_categories_with_inactive_products(self):
self.assertTrue(self.child_1.active)
self.assertTrue(self.child_1.active)
self.assertTrue(self.parent_categ.active)
self.product_1.categ_id = self.child_1.id
self.product_1.active = False
with self.assertRaises(ValidationError):
self.parent_categ.active = False
with self.assertRaises(ValidationError):
self.child_1.active = False
| 40.826923 | 2,123 |
978 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Tecnativa - Ernesto Tejeda
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ProductCategory(models.Model):
_inherit = "product.category"
active = fields.Boolean(
default=True,
help="If unchecked, it will allow you to hide the "
"product category without removing it.",
)
@api.constrains("active")
def _check_archive(self):
to_archive = self.filtered(lambda r: not r.active)
if (
self.env["product.template"]
.with_context(active_test=False)
.search([("categ_id", "child_of", to_archive.ids)])
):
raise ValidationError(
_(
"At least one category that you are trying to archive or one "
"of its children has one or more product linked to it."
)
)
| 32.6 | 978 |
578 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Vauxoo, S.A. de C.V.
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
{
"name": "Product Order No Name",
"summary": "Speedup product retrieve",
"version": "15.0.1.0.0",
"development_status": "Production/Stable",
"category": "Technical Settings",
"website": "https://github.com/OCA/product-attribute",
"author": "Vauxoo, Odoo Community Association (OCA)",
"maintainers": ["WR-96", "moylop260", "luisg123v"],
"license": "LGPL-3",
"depends": ["product"],
"application": False,
"installable": True,
}
| 36.125 | 578 |
192 |
py
|
PYTHON
|
15.0
|
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
_order = "priority desc, default_code"
default_code = fields.Char(index=True)
| 24 | 192 |
142 |
py
|
PYTHON
|
15.0
|
from odoo import models
class ProductProduct(models.Model):
_inherit = "product.product"
_order = "priority desc, default_code, id"
| 23.666667 | 142 |
760 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Product State",
"summary": """
Module introducing a state field on product template""",
"author": "ACSONE SA/NV, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-attribute",
"category": "Product",
"version": "15.0.1.1.0",
"license": "AGPL-3",
"depends": ["product", "sale"],
"data": [
"data/product_state_data.xml",
"security/ir.model.access.csv",
"security/ir.model.access.csv",
"views/product_template.xml",
],
"application": False,
"maintainers": ["emagdalenaC2i"],
"post_init_hook": "post_init_hook",
}
| 34.545455 | 760 |
2,938 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import logging
from odoo.exceptions import UserError, ValidationError
from odoo.tests.common import TransactionCase
_logger = logging.getLogger(__name__)
class TestProductState(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.ProductState = cls.env["product.state"]
cls.state = cls.ProductState.create({"name": "State Name", "code": "Code"})
cls.product_obj = cls.env["product.template"]
cls.product_1 = cls.env.ref("product.product_product_4_product_template")
@classmethod
def _create_product(cls, state=None):
vals = {
"name": "Product Test for State",
}
if state:
vals.update({"product_state_id": state.id})
cls.product = cls.product_obj.create(vals)
def test_01_product_state(self):
"""
Check if existing product has the default value (see init_hook)
Check if new state has no products
Create a product, check if it has the default state
Create a product with state
Check if new state has 1 product
"""
self.assertTrue(self.product_1.product_state_id)
self.assertFalse(
self.state.products_count,
)
self._create_product()
self.assertEqual(
self.env.ref("product_state.product_state_sellable"),
self.product.product_state_id,
)
self._create_product(self.state)
self.assertEqual(
self.state,
self.product.product_state_id,
)
self.assertEqual(
1,
self.state.products_count,
)
def test_02_set_product_state(self):
"""
Create product, it has default state
Then, update the state
It should have the existing one (Code)
"""
self._create_product()
self.assertEqual(
self.env.ref("product_state.product_state_sellable"),
self.product.product_state_id,
)
self.product.state = "Code"
self.assertEqual(
self.state,
self.product.product_state_id,
)
def test_03_set_constrains_product_state(self):
"""
Create another default state,
It should have the existing only one default state at time
"""
with self.assertRaises(ValidationError) as cm:
self.env["product.state"].create(
{"name": "Default State 2", "code": "df2", "default": True}
)
wn_expect = cm.exception.args[0]
self.assertEqual("There should be only one default state", wn_expect)
def test_04_invalid_state(self):
self._create_product()
with self.assertRaises(UserError):
self.product.state = "new_code"
| 33.011236 | 2,938 |
1,762 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ProductState(models.Model):
_name = "product.state"
_description = "Product State"
_order = "sequence, id"
name = fields.Char(comodel_name="State Name", required=True, translate=True)
code = fields.Char(string="State Code", required=True)
sequence = fields.Integer(help="Used to order the States", default=25)
active = fields.Boolean(default=True)
description = fields.Text(translate=True)
product_ids = fields.One2many(
comodel_name="product.template",
inverse_name="product_state_id",
string="State Products",
)
products_count = fields.Integer(
string="Number of products",
compute="_compute_products_count",
)
default = fields.Boolean("Default state")
_sql_constraints = [
("code_unique", "UNIQUE(code)", "Product State Code must be unique.")
]
@api.depends("product_ids")
def _compute_products_count(self):
data = self.env["product.template"].read_group(
[("product_state_id", "in", self.ids)],
["product_state_id"],
["product_state_id"],
)
mapped_data = {
record["product_state_id"][0]: record["product_state_id_count"]
for record in data
}
for state in self:
state.products_count = mapped_data.get(state.id, 0)
@api.constrains("default")
def _check_default(self):
if self.search_count([("default", "=", True)]) > 1:
raise ValidationError(_("There should be only one default state"))
| 35.959184 | 1,762 |
2,166 |
py
|
PYTHON
|
15.0
|
# Copyright 2017-2021 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class ProductTemplate(models.Model):
_inherit = "product.template"
state = fields.Char(
string="Product Status",
index=True,
compute="_compute_product_state",
inverse="_inverse_product_state",
readonly=True,
store=True,
)
product_state_id = fields.Many2one(
comodel_name="product.state",
string="State",
help="Select a state for this product",
group_expand="_read_group_state_id",
inverse="_inverse_product_state_id",
default=lambda self: self._get_default_product_state().id,
index=True,
tracking=10,
)
def _inverse_product_state_id(self):
"""
Allow to ease triggering other stuff when product state changes
without a write()
"""
@api.model
def _get_default_product_state(self):
return self.env["product.state"].search([("default", "=", True)], limit=1)
@api.depends("product_state_id")
def _compute_product_state(self):
for product_tmpl in self:
product_tmpl.state = product_tmpl.product_state_id.code
def _inverse_product_state(self):
for product_tmpl in self:
self._set_product_state_id(product_tmpl)
# This method can be called by variant so the record is either
# product.template or product.product
@api.model
def _set_product_state_id(self, record):
"""The record param is for similar state field at product.product model."""
ProductState = record.env["product.state"]
product_state = ProductState.search([("code", "=", record.state)], limit=1)
if record.state and not product_state:
msg = _("The product state code %s could not be found.")
raise UserError(msg % record.state)
record.product_state_id = product_state.id
@api.model
def _read_group_state_id(self, states, domain, order):
return states.search([])
| 33.84375 | 2,166 |
418 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Product Category Product Link",
"summary": """
Allows to get products from a category""",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-attribute",
"depends": ["product"],
}
| 32.153846 | 418 |
320 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ProductCategory(models.Model):
_inherit = "product.category"
product_template_ids = fields.One2many(
"product.template", "categ_id", string="Products Templates"
)
| 24.615385 | 320 |
260 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
categ_id = fields.Many2one(
index=True,
)
| 20 | 260 |
803 |
py
|
PYTHON
|
15.0
|
# © 2016 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
_logger = logging.getLogger(__name__)
try:
from odoo.addons.base_multi_image.hooks import (
pre_init_hook_for_submodules,
uninstall_hook_for_submodules,
)
except ImportError:
_logger.info("Cannot import base_multi_image hooks")
def pre_init_hook(cr):
pre_init_hook_for_submodules(cr, "product.template", "image_1920")
pre_init_hook_for_submodules(cr, "product.product", "image_variant_1920")
def uninstall_hook(cr, registry):
"""Remove multi images for models that no longer use them."""
uninstall_hook_for_submodules(cr, registry, "product.template")
uninstall_hook_for_submodules(cr, registry, "product.product")
| 32.04 | 801 |
885 |
py
|
PYTHON
|
15.0
|
# © 2014-2016 Pedro M. Baeza <[email protected]>
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Multiple Images in Products",
"version": "15.0.1.0.0",
"author": "Antiun Ingeniería, "
"Tecnativa, "
"LasLabs, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/product-attribute",
"category": "Sales Management",
"pre_init_hook": "pre_init_hook",
"uninstall_hook": "uninstall_hook",
"depends": [
"base_multi_image",
"product",
],
"data": [
"views/image_view.xml",
"views/product_template_view.xml",
],
"installable": True,
"images": [
"images/product.png",
"images/db.png",
"images/file.png",
"images/url.png",
],
}
| 27.53125 | 881 |
8,188 |
py
|
PYTHON
|
15.0
|
# © 2016 Pedro M. Baeza <[email protected]>
# Copyright 2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl-3).
from odoo.tests import common
from .. import hooks
class TestProductMultiImage(common.TransactionCase):
def setUp(self):
super().setUp()
self.transparent_image = ( # 1x1 Transparent GIF
b"R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
)
self.grey_image = ( # 1x1 Grey GIF
b"R0lGODlhAQABAIAAAMLCwgAAACH5BAAAAAAALAAAAAABAAEAAAICRAEAOw =="
)
self.black_image = ( # 1x1 Black GIF
b"R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="
)
self.attribute = self.env["product.attribute"].create(
{
"name": "Test attribute",
}
)
self.value_1 = self.env["product.attribute.value"].create(
{
"name": "Test value 1",
"attribute_id": self.attribute.id,
}
)
self.value_2 = self.env["product.attribute.value"].create(
{
"name": "Test value 2",
"attribute_id": self.attribute.id,
}
)
self.product_template = self.env["product.template"].create(
{
"name": "Test product",
"attribute_line_ids": [
(
0,
0,
{
"attribute_id": self.attribute.id,
"value_ids": [(6, 0, (self.value_1 + self.value_2).ids)],
},
)
],
"image_ids": [
(
0,
0,
{
"storage": "db",
"name": "Image 1",
"file_db_store": self.transparent_image,
"owner_model": "product.template",
},
),
(
0,
0,
{
"storage": "db",
"name": "Image 2",
"file_db_store": self.black_image,
"owner_model": "product.template",
},
),
],
}
)
self.product_1 = self.product_template.product_variant_ids[0]
self.product_2 = self.product_template.product_variant_ids[1]
def test_all_images(self):
self.assertEqual(len(self.product_template.image_ids), 2)
self.assertEqual(len(self.product_1.image_ids), 2)
self.assertEqual(len(self.product_2.image_ids), 2)
def test_restrict_one_image(self):
self.product_template.image_ids[0].product_variant_ids = [
(6, 0, self.product_1.ids)
]
self.assertEqual(len(self.product_1.image_ids), 2)
self.assertEqual(len(self.product_2.image_ids), 1)
self.assertEqual(self.product_1.image_1920, self.transparent_image)
self.assertEqual(self.product_2.image_1920, self.black_image)
def test_add_image_variant(self):
self.product_1.image_ids = [
(0, 0, {"storage": "db", "file_db_store": self.grey_image})
]
self.product_template.refresh()
self.assertEqual(len(self.product_template.image_ids), 3)
self.assertEqual(
self.product_template.image_ids[-1].product_variant_ids, self.product_1
)
def test_remove_image_variant(self):
self.product_1.image_ids = [(3, self.product_1.image_ids[0].id)]
self.product_template.refresh()
self.assertEqual(len(self.product_template.image_ids), 2)
self.assertEqual(
self.product_template.image_ids[0].product_variant_ids, self.product_2
)
def test_remove_image_all_variants(self):
self.product_1.image_ids = [(3, self.product_1.image_ids[0].id)]
self.product_2.image_ids = [(3, self.product_2.image_ids[0].id)]
self.product_template.refresh()
self.assertEqual(len(self.product_template.image_ids), 1)
def test_edit_image_variant(self):
text = "Test name changed"
self.product_1.image_ids[0].name = text
self.product_template.refresh()
self.assertEqual(self.product_template.image_ids[0].name, text)
def test_create_variant_afterwards(self):
"""Create a template, assign an image, and then create the variant.
Check that the images are not lost.
"""
template = self.env["product.template"].create(
{
"name": "Test 2",
"image_ids": [
(
0,
0,
{
"storage": "db",
"name": "Image 1",
"file_db_store": self.transparent_image,
"owner_model": "product.template",
},
)
],
}
)
self.assertEqual(
len(template.image_ids),
1,
"Product template did not start with singleton image_ids. "
"Got %s" % (template.image_ids,),
)
template.write(
{
"attribute_line_ids": [
(
0,
0,
{
"attribute_id": self.attribute.id,
"value_ids": [(6, 0, (self.value_1 + self.value_2).ids)],
},
)
],
}
)
self.assertEqual(
len(template.image_ids),
1,
"Product template did not retain the singleton image_ids. "
"Got %s" % (template.image_ids,),
)
for variant in template.product_variant_ids:
self.assertEqual(
len(variant.image_ids),
1,
"Product variant did not receive the image_ids. Got %s"
% (variant.image_ids,),
)
def test_remove_variant_with_image(self):
self.product_template.image_ids[0].product_variant_ids = [
(6, 0, self.product_1.ids)
]
self.product_1.unlink()
self.assertEqual(len(self.product_template.image_ids), 1)
def test_image_product_variant_count(self):
"""It should provide a total of variants related to image"""
image = self.product_1.image_ids[0]
image.product_variant_ids = [(6, 0, self.product_1.ids)]
self.assertEqual(
image.product_variant_count,
1,
)
def test_pre_init_hook_product(self):
"""It should populate the ``image_ids`` on existing product"""
product = self.env.ref("product.product_product_3")
self.assertEqual(
len(product.image_ids),
1,
)
def test_pre_init_hook_template(self):
"""It should populate the ``image_ids`` on existing template"""
product = self.env.ref("product.product_product_3_product_template")
self.assertEqual(
len(product.image_ids),
1,
)
def test_uninstall_hook_product(self):
"""It should remove ``image_ids`` associated with products"""
hooks.uninstall_hook(self.env.cr, self.registry)
images = self.env["base_multi_image.image"].search(
[("owner_model", "=", "product.product")],
)
self.assertFalse(len(images))
def test_uninstall_hook_teplate(self):
"""It should remove ``image_ids`` associated with templates"""
hooks.uninstall_hook(self.env.cr, self.registry)
images = self.env["base_multi_image.image"].search(
[("owner_model", "=", "product.template")],
)
self.assertFalse(len(images))
| 36.549107 | 8,187 |
396 |
py
|
PYTHON
|
15.0
|
# © 2014-2016 Pedro M. Baeza <[email protected]>
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models
class ProductTemplate(models.Model):
_name = "product.template"
_inherit = [_name, "base_multi_image.owner"]
# image, image_medium, image_small fields are not available since 13.0
| 32.75 | 393 |
3,427 |
py
|
PYTHON
|
15.0
|
# © 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl-3).
from odoo import api, fields, models
class ProductProduct(models.Model):
_name = "product.product"
_inherit = [_name, "base_multi_image.owner"]
# Make this field computed for getting only the available images
image_ids = fields.One2many(
comodel_name="base_multi_image.image",
compute="_compute_image_ids",
inverse="_inverse_image_ids",
)
# image, image_medium, image_small fields are not available since 13.0
@api.depends(
"product_tmpl_id",
"product_tmpl_id.image_ids",
"product_tmpl_id.image_ids.product_variant_ids",
)
def _compute_image_ids(self):
for product in self:
images = product.product_tmpl_id.image_ids.filtered(
lambda x: (
not x.product_variant_ids or product.id in x.product_variant_ids.ids
)
)
product.image_ids = [(6, 0, images.ids)]
if product.image_ids:
product.image_1920 = product.image_ids[0].image_main
def _inverse_image_ids(self):
for product in self:
# Remember the list of images that were before changes
previous_images = product.product_tmpl_id.image_ids.filtered(
lambda x: (
not x.product_variant_ids or product.id in x.product_variant_ids.ids
)
)
for image in product.image_ids:
if isinstance(image.id, models.NewId):
# Image added
image.owner_id = product.product_tmpl_id.id
image.owner_model = "product.template"
image.product_variant_ids = [(6, 0, product.ids)]
image.create(image._convert_to_write(image._cache))
else:
previous_images -= image
# Update existing records
image.write(image._convert_to_write(image._cache))
for image in previous_images:
# Images removed
if not image.product_variant_ids:
variants = product.product_tmpl_id.product_variant_ids
else:
variants = image.product_variant_ids
variants -= product
if not variants:
# Remove the image, as there's no variant that contains it
image.unlink()
else:
# Leave the images for the rest of the variants
image.product_variant_ids = [(6, 0, variants.ids)]
product.image_1920 = (
False if len(product.image_ids) < 1 else product.image_ids[0].image_main
)
def unlink(self):
obj = self.with_context(bypass_image_removal=True)
# Remove images that are linked only to the product variant
for product in self:
images2remove = product.image_ids.filtered(
lambda image: (
product in image.product_variant_ids
and len(image.product_variant_ids) == 1
)
)
images2remove.unlink()
# We need to pass context to super so this syntax is valid
return super(ProductProduct, obj).unlink()
| 40.785714 | 3,426 |
843 |
py
|
PYTHON
|
15.0
|
# © 2016 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl-3).
from odoo import api, fields, models
class Image(models.Model):
_inherit = "base_multi_image.image"
product_variant_ids = fields.Many2many(
comodel_name="product.product",
string="Visible in these variants",
help="If you leave it empty, all variants will show this image. "
"Selecting one or several of the available variants, you "
"restrict the availability of the image to those variants.",
)
product_variant_count = fields.Integer(compute="_compute_product_variant_count")
@api.depends("product_variant_ids")
def _compute_product_variant_count(self):
for image in self:
image.product_variant_count = len(image.product_variant_ids)
| 38.272727 | 842 |
621 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 Tecnativa - Carlos Dauden
# Copyright 2018 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Product Supplierinfo Revision",
"version": "15.0.1.0.0",
"category": "Product",
"website": "https://github.com/OCA/product-attribute",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"installable": True,
"depends": ["product"],
"data": [
"security/ir.model.access.csv",
"views/product_supplierinfo_view.xml",
"wizards/supplierinfo_duplicate_wizard_view.xml",
],
}
| 32.684211 | 621 |
1,628 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 Tecnativa - Sergio Teruel
# Copyright 2018 Tecnativa - Vicent Cubells
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.tests import common, tagged
@tagged("post_install", "-at_install")
class TestProductSupplierinfoRevision(common.TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.vendor = cls.env["res.partner"].create({"name": "Suplier test"})
cls.today = datetime.today()
cls.supplierinfo = cls.env["product.supplierinfo"].create(
{"name": cls.vendor.id, "price": 100.0}
)
def test_product_supplierinfo_revision(self):
# run wizard
wizard = self.env["product.supplierinfo.duplicate.wizard"].create(
{
"date_start": self.today + relativedelta(days=1),
"variation_percent": 25.0,
}
)
result = wizard.with_context(active_ids=self.supplierinfo.ids).action_apply()
self.assertEqual(result["res_model"], "product.supplierinfo")
new_supplierinfo = self.env["product.supplierinfo"].browse(
result["domain"][0][2][0]
)
self.assertEqual(
self.supplierinfo.date_end.strftime("%Y-%m-%d"),
self.today.strftime("%Y-%m-%d"),
)
self.assertEqual(
new_supplierinfo.date_start.strftime("%Y-%m-%d"),
(self.today + relativedelta(days=1)).strftime("%Y-%m-%d"),
)
self.assertAlmostEqual(new_supplierinfo.price, 125.0)
| 37 | 1,628 |
1,063 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 Tecnativa - Carlos Dauden
# Copyright 2020 Tecnativa - João Marques
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ProductSupplierinfo(models.Model):
_inherit = "product.supplierinfo"
previous_info_id = fields.Many2one(
comodel_name="product.supplierinfo",
string="Previous info",
help="Relation with previous info when duplicate line",
)
previous_price = fields.Float(
related="previous_info_id.price", string="Previous Price"
)
variation_percent = fields.Float(
compute="_compute_variation_percent",
store=True,
digits="Product Price",
string="Variation %",
)
@api.depends("price", "previous_info_id.price")
def _compute_variation_percent(self):
for line in self:
if not (line.price and line.previous_price):
line.variation_percent = 0.0
else:
line.variation_percent = (line.price / line.previous_price - 1) * 100
| 33.1875 | 1,062 |
1,560 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 Tecnativa - Carlos Dauden
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from dateutil.relativedelta import relativedelta
from odoo import fields, models
class ProductSupplierInfoDuplicateWizard(models.TransientModel):
_name = "product.supplierinfo.duplicate.wizard"
_description = "Product Supplier Duplicate Wizard"
date_start = fields.Date(required=True)
date_end = fields.Date()
variation_percent = fields.Float(
digits="Product Price",
string="Variation %",
)
def action_apply(self):
Supplierinfo = self.env["product.supplierinfo"]
new_ids = list()
for item in Supplierinfo.browse(self.env.context.get("active_ids")):
new_ids.append(
item.copy(
{
"date_start": self.date_start,
"date_end": self.date_end,
"previous_info_id": item.id,
"price": item.price * (1.0 + self.variation_percent / 100.0),
}
).id
)
item.date_end = fields.Date.from_string(self.date_start) - relativedelta(
days=1
)
action = self.env["ir.actions.actions"]._for_xml_id(
"product.product_supplierinfo_type_action"
)
if len(new_ids) > 0:
action["domain"] = [("id", "in", new_ids)]
else: # pragma: no cover
action = {"type": "ir.actions.act_window_close"}
return action
| 35.454545 | 1,560 |
574 |
py
|
PYTHON
|
15.0
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Product Manufacturer",
"version": "15.0.1.0.2",
"summary": "Adds manufacturers and attributes on the product view.",
"website": "https://github.com/OCA/product-attribute",
"author": "OpenERP SA, Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Product",
"depends": ["product"],
"data": ["views/product_manufacturer_view.xml"],
"auto_install": False,
"installable": True,
}
| 38.266667 | 574 |
5,116 |
py
|
PYTHON
|
15.0
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
class TestProductManufacturer(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.manufacturer_a = cls.env["res.partner"].create({"name": "Manufacturer A"})
cls.manufacturer_b = cls.env["res.partner"].create({"name": "Manufacturer B"})
cls.attr1 = cls.env["product.attribute"].create({"name": "color"})
cls.attr1_1 = cls.env["product.attribute.value"].create(
{"name": "red", "attribute_id": cls.attr1.id}
)
cls.attr1_2 = cls.env["product.attribute.value"].create(
{"name": "blue", "attribute_id": cls.attr1.id}
)
cls.product1 = cls.env["product.template"].create(
{
"name": "Test Product Manufacturer 1",
}
)
def test_01_product_manufacturer(self):
self.product1.update(
{
"manufacturer": self.manufacturer_a.id,
"manufacturer_pname": "Test Product A",
"manufacturer_pref": "TPA",
"manufacturer_purl": "https://www.manufacturera.com/test_product_a",
}
)
self.assertEqual(
self.product1.product_variant_id.manufacturer.id, self.manufacturer_a.id
)
self.assertEqual(
self.product1.product_variant_id.manufacturer_pname, "Test Product A"
)
self.assertEqual(self.product1.product_variant_id.manufacturer_pref, "TPA")
self.assertEqual(
self.product1.product_variant_id.manufacturer_purl,
"https://www.manufacturera.com/test_product_a",
)
def test_02_product_manufacturer(self):
self.product1.update(
{
"attribute_line_ids": [
(
0,
0,
{
"attribute_id": self.attr1.id,
"value_ids": [(6, 0, [self.attr1_1.id, self.attr1_2.id])],
},
),
],
}
)
self.product1.product_variant_ids[0].update(
{
"manufacturer": self.manufacturer_b.id,
"manufacturer_pname": "Test Product B",
"manufacturer_pref": "TPB",
"manufacturer_purl": "https://www.manufacturerb.com/test_product_b",
}
)
self.product1.product_variant_ids[1].update(
{
"manufacturer": self.manufacturer_a.id,
"manufacturer_pname": "Test Product A",
"manufacturer_pref": "TPA",
"manufacturer_purl": "https://www.manufacturera.com/test_product_a",
}
)
self.assertEqual(self.product1.manufacturer.id, False)
self.assertEqual(self.product1.manufacturer_pname, False)
self.assertEqual(self.product1.manufacturer_pref, False)
self.assertEqual(self.product1.manufacturer_purl, False)
self.assertEqual(
self.product1.product_variant_ids[1].manufacturer.id, self.manufacturer_a.id
)
self.assertEqual(
self.product1.product_variant_ids[1].manufacturer_pname, "Test Product A"
)
self.assertEqual(self.product1.product_variant_ids[1].manufacturer_pref, "TPA")
self.assertEqual(
self.product1.product_variant_ids[1].manufacturer_purl,
"https://www.manufacturera.com/test_product_a",
)
self.assertEqual(
self.product1.product_variant_ids[0].manufacturer.id, self.manufacturer_b.id
)
self.assertEqual(
self.product1.product_variant_ids[0].manufacturer_pname, "Test Product B"
)
self.assertEqual(self.product1.product_variant_ids[0].manufacturer_pref, "TPB")
self.assertEqual(
self.product1.product_variant_ids[0].manufacturer_purl,
"https://www.manufacturerb.com/test_product_b",
)
def test_03_product_manufacturer_creation(self):
new_pt = self.env["product.template"].create(
{
"name": "New Product Template",
"manufacturer": self.manufacturer_a.id,
"manufacturer_pname": "Test Product A",
"manufacturer_pref": "TPA",
"manufacturer_purl": "https://www.manufacturera.com/test_product_a",
}
)
self.assertEqual(
new_pt.product_variant_id.manufacturer.id, new_pt.manufacturer.id
)
self.assertEqual(
new_pt.product_variant_id.manufacturer_pname, new_pt.manufacturer_pname
)
self.assertEqual(
new_pt.product_variant_id.manufacturer_pref, new_pt.manufacturer_pref
)
self.assertEqual(
new_pt.product_variant_id.manufacturer_purl, new_pt.manufacturer_purl
)
| 39.658915 | 5,116 |
3,974 |
py
|
PYTHON
|
15.0
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
manufacturer = fields.Many2one(
comodel_name="res.partner",
compute="_compute_manufacturer_info",
inverse="_inverse_manufacturer_info",
store=True,
)
manufacturer_pname = fields.Char(
compute="_compute_manufacturer_info",
inverse="_inverse_manufacturer_info",
store=True,
string="Manuf. Product Name",
)
manufacturer_pref = fields.Char(
compute="_compute_manufacturer_info",
inverse="_inverse_manufacturer_info",
store=True,
string="Manuf. Product Code",
)
manufacturer_purl = fields.Char(
compute="_compute_manufacturer_info",
inverse="_inverse_manufacturer_info",
store=True,
string="Manuf. Product URL",
)
@api.depends(
"product_variant_ids",
"product_variant_ids.manufacturer",
"product_variant_ids.manufacturer_pname",
"product_variant_ids.manufacturer_pref",
"product_variant_ids.manufacturer_purl",
)
def _compute_manufacturer_info(self):
unique_variants = self.filtered(
lambda template: len(template.product_variant_ids) == 1
)
for template in unique_variants:
template.manufacturer = template.product_variant_ids.manufacturer
template.manufacturer_pname = (
template.product_variant_ids.manufacturer_pname
)
template.manufacturer_pref = template.product_variant_ids.manufacturer_pref
template.manufacturer_purl = template.product_variant_ids.manufacturer_purl
for template in self - unique_variants:
template.manufacturer = False
template.manufacturer_pname = False
template.manufacturer_pref = False
template.manufacturer_purl = False
def _inverse_manufacturer_info(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.manufacturer = template.manufacturer
template.product_variant_ids.manufacturer_pname = (
template.manufacturer_pname
)
template.product_variant_ids.manufacturer_pref = (
template.manufacturer_pref
)
template.product_variant_ids.manufacturer_purl = (
template.manufacturer_purl
)
@api.model_create_multi
def create(self, vals_list):
"""Overwrite creation for rewriting manufacturer information (if set and having
only one variant), after the variant creation, that is performed in super.
TODO : when migrating in version 16.0, remove the overload of the create function
and overload instead the new function _get_related_fields_variant_template()
introduced here : https://github.com/odoo/odoo/pull/82642
"""
templates = super().create(vals_list)
for template, vals in zip(templates, vals_list):
if len(template.product_variant_ids) == 1:
related_vals = {}
if vals.get("manufacturer"):
related_vals["manufacturer"] = vals["manufacturer"]
if vals.get("manufacturer_pname"):
related_vals["manufacturer_pname"] = vals["manufacturer_pname"]
if vals.get("manufacturer_pref"):
related_vals["manufacturer_pref"] = vals["manufacturer_pref"]
if vals.get("manufacturer_purl"):
related_vals["manufacturer_purl"] = vals["manufacturer_purl"]
if related_vals:
template.write(related_vals)
return templates
| 41.831579 | 3,974 |
492 |
py
|
PYTHON
|
15.0
|
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ProductProduct(models.Model):
_inherit = "product.product"
manufacturer = fields.Many2one(comodel_name="res.partner")
manufacturer_pname = fields.Char(string="Manuf. Product Name")
manufacturer_pref = fields.Char(string="Manuf. Product Code")
manufacturer_purl = fields.Char(string="Manuf. Product URL")
| 37.846154 | 492 |
639 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Product Secondary Unit",
"summary": "Set a secondary unit per product",
"version": "15.0.2.0.1",
"development_status": "Production/Stable",
"category": "Product",
"website": "https://github.com/OCA/product-attribute",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": ["product"],
"data": ["security/ir.model.access.csv", "views/product_views.xml"],
"maintainers": ["sergio-teruel"],
}
| 37.588235 | 639 |
1,017 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class SecondaryUnitFake(models.Model):
_name = "secondary.unit.fake"
_inherit = "product.secondary.unit.mixin"
_description = "Secondary unit fake model for tests"
_secondary_unit_fields = {
"qty_field": "product_uom_qty",
"uom_field": "product_uom_id",
}
name = fields.Char()
product_id = fields.Many2one("product.product", "Product", readonly=True)
product_uom_qty = fields.Float(
store=True, readonly=False, compute="_compute_product_uom_qty"
)
product_uom_id = fields.Many2one("uom.uom", string="Product Unit of Measure")
@api.depends("secondary_uom_qty", "secondary_uom_id")
def _compute_product_uom_qty(self):
self._compute_helper_target_field_qty()
@api.onchange("product_uom_id")
def _onchange_product_uom(self):
self._onchange_helper_product_uom_for_secondary()
| 36.321429 | 1,017 |
5,379 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo_test_helper import FakeModelLoader
from odoo.tests import TransactionCase
class TestProductSecondaryUnitMixin(TransactionCase, FakeModelLoader):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.loader = FakeModelLoader(cls.env, cls.__module__)
cls.loader.backup_registry()
from .models import SecondaryUnitFake
cls.loader.update_registry((SecondaryUnitFake,))
cls.product_uom_kg = cls.env.ref("uom.product_uom_kgm")
cls.product_uom_unit = cls.env.ref("uom.product_uom_unit")
cls.product_uom_dozen = cls.env.ref("uom.product_uom_dozen")
cls.product_template = cls.env["product.template"].create(
{
"name": "test",
"uom_id": cls.product_uom_kg.id,
"uom_po_id": cls.product_uom_kg.id,
"secondary_uom_ids": [
(
0,
0,
{
"code": "C5",
"name": "box 5",
"uom_id": cls.product_uom_unit.id,
"factor": 5,
},
),
(
0,
0,
{
"code": "C10",
"name": "box 10",
"uom_id": cls.product_uom_unit.id,
"factor": 10,
},
),
],
}
)
cls.secondary_unit_box_5 = cls.product_template.secondary_uom_ids[0]
cls.secondary_unit_box_10 = cls.product_template.secondary_uom_ids[1]
# Fake model which inherit from
cls.secondary_unit_fake = cls.env["secondary.unit.fake"].create(
{
"name": "Secondary unit fake",
"product_id": cls.product_template.product_variant_ids.id,
"product_uom_id": cls.product_uom_unit.id,
}
)
@classmethod
def tearDownClass(cls):
cls.loader.restore_registry()
return super(TestProductSecondaryUnitMixin, cls).tearDownClass()
def test_product_secondary_unit_mixin(self):
fake_model = self.secondary_unit_fake
fake_model.write(
{"secondary_uom_qty": 2, "secondary_uom_id": self.secondary_unit_box_5.id}
)
self.assertEqual(fake_model.product_uom_qty, 10.0)
fake_model.write(
{"secondary_uom_qty": 2, "secondary_uom_id": self.secondary_unit_box_10.id}
)
self.assertEqual(fake_model.product_uom_qty, 20.0)
fake_model.write({"product_uom_qty": 40.0})
self.assertEqual(fake_model.secondary_uom_qty, 4)
# Test onchange helper method
fake_model.write(
{"secondary_uom_qty": 1, "secondary_uom_id": self.secondary_unit_box_10.id}
)
fake_model.flush()
fake_model.product_uom_id = self.product_uom_dozen
fake_model._onchange_helper_product_uom_for_secondary()
self.assertEqual(fake_model.secondary_uom_qty, 12)
def test_product_secondary_unit_mixin_no_uom(self):
# If secondary_uom_id is not informed product_qty on target model is
# not computed.
fake_model = self.secondary_unit_fake
fake_model.secondary_uom_qty = 23
self.assertEqual(fake_model.product_uom_qty, 0)
def test_product_secondary_unit_mixin_no_uom_onchange(self):
# If secondary_uom_id is not informed secondary_uom_qty on source
# model is not computed.
fake_model = self.secondary_unit_fake
# import pdb ; pdb.set_trace()
fake_model._onchange_helper_product_uom_for_secondary()
self.assertEqual(fake_model.secondary_uom_qty, 0)
def test_chained_compute_field(self):
"""Secondary_uom_qty has not been computed when secondary_uom_id changes"""
fake_model = self.secondary_unit_fake
fake_model.secondary_uom_qty = 2.0
fake_model.secondary_uom_id = self.secondary_unit_box_5
self.assertEqual(fake_model.product_uom_qty, 10.0)
self.assertEqual(fake_model.secondary_uom_qty, 2.0)
fake_model.secondary_uom_id = self.secondary_unit_box_10
self.assertEqual(fake_model.product_uom_qty, 20.0)
self.assertEqual(fake_model.secondary_uom_qty, 2.0)
def test_independent_type(self):
# dependent type is already tested as dependency_type by default
fake_model = self.secondary_unit_fake
fake_model.secondary_uom_id = self.secondary_unit_box_5
fake_model.secondary_uom_id.write({"dependency_type": "independent"})
fake_model.write({"secondary_uom_qty": 2})
self.assertEqual(fake_model.product_uom_qty, 0.0)
self.assertEqual(fake_model.secondary_uom_qty, 2)
fake_model.write({"product_uom_qty": 17})
self.assertEqual(fake_model.product_uom_qty, 17)
self.assertEqual(fake_model.secondary_uom_qty, 2)
fake_model.write({"secondary_uom_qty": 4})
self.assertEqual(fake_model.product_uom_qty, 17)
self.assertEqual(fake_model.secondary_uom_qty, 4)
| 41.061069 | 5,379 |
2,198 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests import TransactionCase, tagged
@tagged("post_install", "-at_install")
class TestProductSecondaryUnit(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.product_uom_kg = cls.env.ref("uom.product_uom_kgm")
cls.product_uom_unit = cls.env.ref("uom.product_uom_unit")
cls.product = cls.env["product.template"].create(
{
"name": "test",
"uom_id": cls.product_uom_kg.id,
"uom_po_id": cls.product_uom_kg.id,
"secondary_uom_ids": [
(
0,
0,
{
"code": "A",
"name": "unit-700",
"uom_id": cls.product_uom_unit.id,
"factor": 0.7,
},
),
(
0,
0,
{
"code": "B",
"name": "unit-900",
"uom_id": cls.product_uom_unit.id,
"factor": 0.9,
},
),
],
}
)
cls.secondary_unit = cls.env["product.secondary.unit"].search(
[("product_tmpl_id", "=", cls.product.id)], limit=1
)
def test_product_secondary_unit_name(self):
self.assertEqual(self.secondary_unit.name_get()[0][1], "unit-700-0.7")
def test_product_secondary_unit_search(self):
args = [
(
"product_tmpl_id.product_variant_ids",
"in",
self.product.product_variant_ids.ids,
)
]
name_get = self.env["product.secondary.unit"].name_search(name="A", args=args)
self.assertEqual(len(name_get), 1)
name_get = self.env["product.secondary.unit"].name_search(name="X", args=args)
self.assertEqual(len(name_get), 0)
| 36.633333 | 2,198 |
5,483 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.tools.float_utils import float_round
class ProductSecondaryUnitMixin(models.AbstractModel):
"""
Mixin model that allows to compute a field from a secondary unit helper
An example is to extend any model in which you want to compute quantities
based on secondary units. You must add a dictionary `_secondary_unit_fields`
as class variable with the following content:
_secondary_unit_fields = {
"qty_field": "product_uom_qty",
"uom_field": "product_uom"
}
To compute ``qty_field`` on target model, you must convert the field to computed
writable (computed, stored and readonly=False), and you have to define the
compute method adding ``secondary_uom_id`` and ``secondary_uom_qty`` fields
as dependencies and calling inside to ``self._compute_helper_target_field_qty()``.
To compute secondary units when user changes the uom field on target model,
you must add an onchange method on uom field and call to
``self._onchange_helper_product_uom_for_secondary()``
You can see an example in ``purchase_order_secondary_unit`` on purchase-workflow
repository.
"""
_name = "product.secondary.unit.mixin"
_description = "Product Secondary Unit Mixin"
_secondary_unit_fields = {}
@api.model
def _get_default_secondary_uom(self):
return self.env["product.template"]._get_default_secondary_uom()
secondary_uom_qty = fields.Float(
string="Secondary Qty",
digits="Product Unit of Measure",
store=True,
readonly=False,
compute="_compute_secondary_uom_qty",
)
secondary_uom_id = fields.Many2one(
comodel_name="product.secondary.unit",
string="Second unit",
ondelete="restrict",
default=_get_default_secondary_uom,
)
def _get_uom_line(self):
return self[self._secondary_unit_fields["uom_field"]]
def _get_factor_line(self):
return self.secondary_uom_id.factor * self._get_uom_line().factor
def _get_quantity_from_line(self):
return self[self._secondary_unit_fields["qty_field"]]
@api.model
def _get_secondary_uom_qty_depends(self):
if not self._secondary_unit_fields:
return []
return [self._secondary_unit_fields["qty_field"]]
@api.depends(lambda x: x._get_secondary_uom_qty_depends())
def _compute_secondary_uom_qty(self):
for line in self:
if not line.secondary_uom_id:
line.secondary_uom_qty = 0.0
continue
elif line.secondary_uom_id.dependency_type == "independent":
continue
factor = line._get_factor_line()
qty_line = line._get_quantity_from_line()
qty = float_round(
qty_line / (factor or 1.0),
precision_rounding=line.secondary_uom_id.uom_id.rounding,
)
line.secondary_uom_qty = qty
def _get_default_value_for_qty_field(self):
return self.default_get([self._secondary_unit_fields["qty_field"]]).get(
self._secondary_unit_fields["qty_field"]
)
def _compute_helper_target_field_qty(self):
"""Set the target qty field defined in model"""
default_qty_field_value = self._get_default_value_for_qty_field()
for rec in self:
if not rec.secondary_uom_id:
rec[rec._secondary_unit_fields["qty_field"]] = (
rec._origin[rec._secondary_unit_fields["qty_field"]]
or default_qty_field_value
)
continue
if rec.secondary_uom_id.dependency_type == "independent":
if rec[rec._secondary_unit_fields["qty_field"]] == 0.0:
rec[
rec._secondary_unit_fields["qty_field"]
] = default_qty_field_value
continue
# To avoid recompute secondary_uom_qty field when
# secondary_uom_id changes.
rec.env.remove_to_compute(
field=rec._fields["secondary_uom_qty"], records=rec
)
factor = rec._get_factor_line()
qty = float_round(
rec.secondary_uom_qty * factor,
precision_rounding=rec._get_uom_line().rounding,
)
rec[rec._secondary_unit_fields["qty_field"]] = qty
def _onchange_helper_product_uom_for_secondary(self):
"""Helper method to be called from onchange method of uom field in
target model.
"""
if not self.secondary_uom_id:
self.secondary_uom_qty = 0.0
return
elif self.secondary_uom_id.dependency_type == "independent":
return
factor = self._get_factor_line()
line_qty = self._get_quantity_from_line()
qty = float_round(
line_qty / (factor or 1.0),
precision_rounding=self.secondary_uom_id.uom_id.rounding,
)
self.secondary_uom_qty = qty
@api.model
def default_get(self, fields_list):
defaults = super().default_get(fields_list)
if self.secondary_uom_id and not self.env.context.get(
"skip_default_secondary_uom_qty", False
):
defaults["secondary_uom_qty"] = 1.0
return defaults
| 38.886525 | 5,483 |
688 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
secondary_uom_ids = fields.One2many(
comodel_name="product.secondary.unit",
inverse_name="product_tmpl_id",
string="Secondary Unit of Measure",
help="Default Secondary Unit of Measure.",
context={"active_test": False},
)
@api.model
def _get_default_secondary_uom(self):
return (
self.secondary_uom_ids
and self.secondary_uom_ids[0]
or self.secondary_uom_ids
)
| 29.913043 | 688 |
1,225 |
py
|
PYTHON
|
15.0
|
# Copyright 2023 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ProductProduct(models.Model):
_inherit = "product.product"
secondary_uom_ids = fields.One2many(
comodel_name="product.secondary.unit",
inverse_name="product_id",
string="Secondary Unit of Measure",
help="Default Secondary Unit of Measure.",
context={"active_test": False},
compute="_compute_secondary_uom_ids",
inverse="_inverse_secondary_uom_ids",
)
@api.depends("product_tmpl_id")
def _compute_secondary_uom_ids(self):
for variant in self:
variant.secondary_uom_ids = (
variant.product_tmpl_id.secondary_uom_ids.filtered(
lambda s: s.product_id == variant or not s.product_id
)
)
def _inverse_secondary_uom_ids(self):
for variant in self:
variant.product_tmpl_id.secondary_uom_ids = (
variant.product_tmpl_id.secondary_uom_ids.filtered(
lambda s: s.product_id != variant
)
+ variant.secondary_uom_ids
)
| 35 | 1,225 |
2,294 |
py
|
PYTHON
|
15.0
|
# Copyright 2018 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class ProductSecondaryUnit(models.Model):
_name = "product.secondary.unit"
_description = "Product Secondary Unit"
name = fields.Char(required=True, translate=True)
code = fields.Char()
product_tmpl_id = fields.Many2one(
comodel_name="product.template",
string="Product Template",
required=True,
ondelete="cascade",
)
product_id = fields.Many2one(
comodel_name="product.product",
string="Product Variant",
ondelete="cascade",
)
uom_id = fields.Many2one(
comodel_name="uom.uom",
string="Secondary Unit of Measure",
required=True,
help="Default Secondary Unit of Measure.",
)
dependency_type = fields.Selection(
selection=[
("dependent", "Dependent"),
("independent", "Independent"),
],
default="dependent",
help="If dependency type is 'dependent' the factor is used "
"to compute quantity in primary unit,"
"otherwise primary and secondary unit are independent. "
"For example if you sell service"
"by package (1 unit for example) and you want to put the "
"real time (ex : 4 hours) to allows employee scheduling",
)
factor = fields.Float(
string="Secondary Unit Factor", default=1.0, digits=0, required=True
)
active = fields.Boolean(default=True)
def name_get(self):
result = []
for unit in self:
result.append(
(
unit.id,
"{unit_name}-{factor}".format(
unit_name=unit.name, factor=unit.factor
),
)
)
return result
@api.model
def name_search(self, name="", args=None, operator="ilike", limit=100):
if args is None:
args = []
units = self.search([("code", "=", name)] + args, limit=1)
if not units:
return super(ProductSecondaryUnit, self).name_search(
name=name, args=args, operator=operator, limit=limit
)
return units.name_get()
| 33.246377 | 2,294 |
772 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015-18 Tecnativa
# Copyright 2017-18 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Product Supplierinfo for Customers",
"summary": "Allows to define prices for customers in the products",
"version": "15.0.1.0.1",
"development_status": "Production/Stable",
"author": "AvanzOSC, " "Tecnativa, " "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-attribute",
"category": "Sales Management",
"license": "AGPL-3",
"depends": ["product"],
"data": [
"security/ir.model.access.csv",
"views/product_view.xml",
],
"demo": ["demo/product_demo.xml"],
"installable": True,
}
| 35.090909 | 772 |
7,199 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015 Tecnativa
# Copyright 2018 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class TestProductSupplierinfoForCustomer(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.supplierinfo_model = cls.env["product.supplierinfo"]
cls.customerinfo_model = cls.env["product.customerinfo"]
cls.pricelist_item_model = cls.env["product.pricelist.item"]
cls.pricelist_model = cls.env["product.pricelist"]
cls.customer = cls._create_customer("customer1")
cls.unknown = cls._create_customer("customer2")
cls.product = cls.env.ref("product.product_product_4")
cls.customerinfo = cls._create_partnerinfo(
"customer", cls.customer, cls.product
)
cls.pricelist = cls.env["product.pricelist"].create(
{"name": "Test Pricelist", "currency_id": cls.env.ref("base.USD").id}
)
cls.company = cls.env.ref("base.main_company")
cls.pricelist_item = cls.env["product.pricelist.item"].create(
{
"applied_on": "1_product",
"base": "list_price",
"name": "Test Pricelist Item",
"pricelist_id": cls.pricelist.id,
"compute_price": "fixed",
"fixed_price": 100.0,
"product_tmpl_id": cls.product.product_tmpl_id.id,
}
)
@classmethod
def _create_customer(cls, name):
"""Create a Partner."""
return cls.env["res.partner"].create(
{"name": name, "email": "[email protected]", "phone": 123456}
)
@classmethod
def _create_partnerinfo(cls, supplierinfo_type, partner, product):
return cls.env["product." + supplierinfo_type + "info"].create(
{
"name": partner.id,
"product_id": product.id,
"product_code": "00001",
"price": 100.0,
}
)
def test_default_get(self):
"""checking values returned by default_get()"""
fields = ["name"]
values = self.customer.with_context(select_type=True).default_get(fields)
self.assertEqual(values["customer"], False, "Incorrect default")
def test_product_supplierinfo_for_customer(self):
cond = [("name", "=", self.customer.id)]
supplierinfos = self.supplierinfo_model.search(cond)
self.assertEqual(len(supplierinfos), 0, "Error: Supplier found in Supplierinfo")
cond = [("name", "=", self.customer.id)]
customerinfos = self.customerinfo_model.search(cond)
self.assertNotEqual(
len(customerinfos), 0, "Error: Customer not found in Supplierinfo"
)
price, rule_id = self.pricelist.get_product_price_rule(
self.product, 1, partner=self.customer
)
self.assertEqual(
rule_id, self.pricelist_item.id, "Error: Price unit not found for customer"
)
self.assertEqual(
price, 100.0, "Error: Price not found for product and customer"
)
def test_product_supplierinfo_price(self):
price = self.product._get_price_from_customerinfo(partner_id=self.customer.id)
self.assertEqual(
price, 100.0, "Error: Price not found for product and customer"
)
self.product.company_id = self.company
res = self.product.with_context(partner_id=self.customer.id).price_compute(
"partner", self.product.uom_id, self.company.currency_id, self.company
)
self.assertEqual(
res[self.product.id], 100.0, "Error: Wrong price for product and customer"
)
res = self.product.with_context(partner_id=self.unknown.id).price_compute(
"partner", self.product.uom_id, self.company.currency_id, self.company
)
self.assertEqual(
res[self.product.id], 750.0, "Error: price does not match list price"
)
def test_variant_supplierinfo_price(self):
"""
This test check the price for a customer with a product with variants.
Create a pricelist based on partner price.
Assign specific price for a variant (100.0) and for template (all
other variants --> 30.0).
"""
self.piece_template = self.env["product.template"].create(
{"name": "Piece for test", "price": 10.0, "company_id": self.company.id}
)
self.large_attribute = self.env["product.attribute"].create(
{"name": "Large test", "sequence": 1}
)
self.large_125 = self.env["product.attribute.value"].create(
{
"name": "Large 125",
"attribute_id": self.large_attribute.id,
"sequence": 1,
}
)
self.large_250 = self.env["product.attribute.value"].create(
{
"name": "Large 250",
"attribute_id": self.large_attribute.id,
"sequence": 2,
}
)
self.piece_large_attribute_lines = self.env[
"product.template.attribute.line"
].create(
{
"product_tmpl_id": self.piece_template.id,
"attribute_id": self.large_attribute.id,
"value_ids": [(6, 0, [self.large_125.id, self.large_250.id])],
}
)
template = self.piece_template
product = template.product_variant_ids[0]
product_1 = template.product_variant_ids[1]
pricelist = self.env["product.pricelist"].create(
{
"name": "Test Pricelist Customer",
"currency_id": self.env.ref("base.USD").id,
}
)
self.env["product.pricelist.item"].create(
{
"applied_on": "3_global",
"base": "partner",
"name": "Test Pricelist Item",
"pricelist_id": pricelist.id,
"compute_price": "formula",
}
)
self._create_partnerinfo("customer", self.customer, product)
price_by_template = self.customerinfo_model.create(
{"name": self.customer.id, "product_tmpl_id": template.id, "price": 30.0}
)
res = product.with_context(partner_id=self.customer.id).price_compute(
"partner", product.uom_id, self.company.currency_id, self.company
)
self.assertEqual(res[product.id], 100.0)
res = product_1.with_context(partner_id=self.customer.id).price_compute(
"partner", product_1.uom_id, self.company.currency_id, self.company
)
self.assertEqual(res[product_1.id], 30.0)
# Remove template specific price, the price must be the template
# list_price
price_by_template.unlink()
res = product_1.with_context(partner_id=self.customer.id).price_compute(
"partner", product_1.uom_id, self.company.currency_id, self.company
)
self.assertEqual(res[product_1.id], 10.0)
| 41.373563 | 7,199 |
425 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015 Tecnativa
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ProductPricelistItem(models.Model):
_inherit = "product.pricelist.item"
base = fields.Selection(
selection_add=[("partner", "Partner Prices on the product form")],
ondelete={"partner": "set default"},
)
| 30.357143 | 425 |
1,678 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 ForgeFlow S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class ProductSupplierInfo(models.Model):
_inherit = "product.supplierinfo"
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
res = super().search(args, offset=offset, limit=limit, order=order, count=count)
if (
self.env.context.get("customerinfo")
and self._name == "product.supplierinfo"
):
limit2 = limit - len(res) if limit else limit
res2 = self.env["product.customerinfo"].search(
args, offset=offset, limit=limit2, order=order, count=count
)
res2 = res2.read(list(self.env["product.supplierinfo"]._fields.keys()))
for result in res2:
res += self.env["product.supplierinfo"].new(result)
return res
def read(self, fields=None, load="_classic_read"):
if (
self.env.context.get("customerinfo")
and self._name == "product.supplierinfo"
):
has_ids = self.filtered(
lambda x: x.id in x._ids and isinstance(x.id, (int,))
)
new_ids = self.filtered(
lambda x: x.id in x._ids and not isinstance(x.id, (int,))
)
return super(ProductSupplierInfo, has_ids).read(
fields=fields, load=load
) + [
{f: x[f] for f in x._fields if (f in fields if fields else True)}
for x in new_ids
]
else:
return super().read(fields=fields, load=load)
| 39.023256 | 1,678 |
721 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Tecnativa - Pedro M. Baeza
# Copyright 2019 ForgeFlow S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
class ProductCustomerInfo(models.Model):
_inherit = "product.supplierinfo"
_name = "product.customerinfo"
_description = "Customer Pricelist"
name = fields.Many2one(string="Customer", help="Customer of this product")
@api.model
def get_import_templates(self):
return [
{
"label": _("Import Template for Customer Pricelists"),
"template": "/product_supplierinfo_for_customer/static/xls/"
"product_customerinfo.xls",
}
]
| 32.772727 | 721 |
628 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015 Tecnativa
# Copyright 2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
customer_ids = fields.One2many(
comodel_name="product.customerinfo",
inverse_name="product_tmpl_id",
string="Customer",
)
variant_customer_ids = fields.One2many(
comodel_name="product.customerinfo",
inverse_name="product_tmpl_id",
string="Variant Customer",
)
| 28.545455 | 628 |
5,027 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015 Tecnativa
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import datetime
from odoo import api, models
class ProductProduct(models.Model):
_inherit = "product.product"
def name_get(self):
res = super(ProductProduct, self.with_context(customerinfo=True)).name_get()
return res
@api.model
def _name_search(
self, name="", args=None, operator="ilike", limit=100, name_get_uid=None
):
res = super()._name_search(
name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid
)
res_ids = list(res)
res_ids_len = len(res_ids)
if not limit or res_ids_len >= limit:
limit = (limit - res_ids_len) if limit else False
if (
not name
and limit
or not self._context.get("partner_id")
or res_ids_len >= limit
):
return res_ids
limit -= res_ids_len
customerinfo_ids = self.env["product.customerinfo"]._search(
[
("name", "=", self._context.get("partner_id")),
"|",
("product_code", operator, name),
("product_name", operator, name),
],
limit=limit,
access_rights_uid=name_get_uid,
)
if not customerinfo_ids:
return res_ids
res_templates = self.browse(res_ids).mapped("product_tmpl_id")
product_tmpls = (
self.env["product.customerinfo"]
.browse(customerinfo_ids)
.mapped("product_tmpl_id")
- res_templates
)
product_ids = list(
self._search(
[("product_tmpl_id", "in", product_tmpls.ids)],
limit=limit,
access_rights_uid=name_get_uid,
)
)
res_ids.extend(product_ids)
return res_ids
def _get_price_from_customerinfo(self, partner_id):
self.ensure_one()
if not partner_id:
return 0.0
partner = self.env["res.partner"].browse(partner_id)
customerinfo = self._select_customerinfo(partner=partner)
if customerinfo:
return customerinfo.price
return 0.0
def price_compute(self, price_type, uom=False, currency=False, company=None):
if price_type == "partner":
partner_id = self.env.context.get(
"partner_id", False
) or self.env.context.get("partner", False)
if partner_id and isinstance(partner_id, models.BaseModel):
partner_id = partner_id.id
prices = super().price_compute("list_price", uom, currency, company)
for product in self:
price = product._get_price_from_customerinfo(partner_id)
if not price:
continue
prices[product.id] = price
if not uom and self._context.get("uom"):
uom = self.env["uom.uom"].browse(self._context["uom"])
if not currency and self._context.get("currency"):
currency = self.env["res.currency"].browse(
self._context["currency"]
)
if uom:
prices[product.id] = product.uom_id._compute_price(
prices[product.id], uom
)
if currency:
date = self.env.context.get("date", datetime.datetime.now())
prices[product.id] = product.currency_id._convert(
prices[product.id], currency, company, date
)
return prices
return super().price_compute(price_type, uom, currency, company)
def _prepare_domain_customerinfo(self, params):
self.ensure_one()
partner_id = params.get("partner_id")
return [
("name", "=", partner_id),
"|",
("product_id", "=", self.id),
"&",
("product_tmpl_id", "=", self.product_tmpl_id.id),
("product_id", "=", False),
]
def _select_customerinfo(
self, partner=False, _quantity=0.0, _date=None, _uom_id=False, params=False
):
"""Customer version of the standard `_select_seller`."""
# TODO: For now it is just the function name with same arguments, but
# can be changed in future migrations to be more in line Odoo
# standard way to select supplierinfo's.
if not params:
params = dict()
params.update({"partner_id": partner.id})
domain = self._prepare_domain_customerinfo(params)
res = (
self.env["product.customerinfo"]
.search(domain)
.sorted(lambda s: (s.sequence, s.min_qty, s.price, s.id))
)
res_1 = res.sorted("product_tmpl_id")[:1]
return res_1
| 37.237037 | 5,027 |
654 |
py
|
PYTHON
|
15.0
|
# Copyright 2015 OdooMRP team
# Copyright 2015 AvanzOSC
# Copyright 2015 Tecnativa
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class ResPartner(models.Model):
_inherit = "res.partner"
@api.model
def default_get(self, fields):
res = super(ResPartner, self).default_get(fields)
select_type = self.env.context.get("select_type", False)
if select_type:
res.update(
{
"customer": select_type == "customer",
"supplier": select_type == "supplier",
}
)
return res
| 29.727273 | 654 |
528 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Product Category Code",
"summary": """
Allows to define a code on product categories""",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV,Odoo Community Association (OCA)",
"maintainers": ["rousseldenis"],
"website": "https://github.com/OCA/product-attribute",
"depends": [
"product",
],
"data": [
"views/product_category.xml",
],
}
| 27.789474 | 528 |
607 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestProductCategoryCode(common.TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
vals = {
"name": "Category Test",
"code": "TEST",
}
cls.category = cls.env["product.category"].create(vals)
def test_category(self):
new_category = self.category.copy()
self.assertEqual(
"TEST-copy",
new_category.code,
)
| 26.391304 | 607 |
493 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
class ProductCategory(models.Model):
_inherit = "product.category"
code = fields.Char(
default="/",
index=True,
)
@api.returns("self", lambda value: value.id)
def copy(self, default=None):
default = default or {}
default.setdefault("code", self.code + _("-copy"))
return super().copy(default)
| 24.65 | 493 |
693 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Product ABC Classification",
"summary": "Includes ABC classification for inventory management",
"version": "15.0.1.0.0",
"author": "ForgeFlow, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-attribute",
"category": "Inventory Management",
"license": "AGPL-3",
"maintainers": ["MiquelRForgeFlow"],
"depends": ["sale_stock"],
"data": [
"data/ir_cron.xml",
"security/ir.model.access.csv",
"views/abc_classification_view.xml",
"views/product_view.xml",
],
"installable": True,
}
| 34.65 | 693 |
1,018 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class ProductCategory(models.Model):
_inherit = "product.category"
abc_classification_profile_id = fields.Many2one("abc.classification.profile")
product_variant_ids = fields.One2many("product.product", inverse_name="categ_id")
@api.onchange("abc_classification_profile_id")
def _onchange_abc_classification_profile_id(self):
for categ in self:
for child in categ._origin.child_id:
child.abc_classification_profile_id = (
categ.abc_classification_profile_id
)
child._onchange_abc_classification_profile_id()
for variant in categ._origin.product_variant_ids.filtered(
lambda p: p.type == "product"
):
variant.abc_classification_profile_id = (
categ.abc_classification_profile_id
)
| 39.153846 | 1,018 |
2,689 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
abc_classification_profile_id = fields.Many2one(
"abc.classification.profile",
compute="_compute_abc_classification_profile_id",
inverse="_inverse_abc_classification_profile_id",
store=True,
)
abc_classification_level_id = fields.Many2one(
"abc.classification.profile.level",
compute="_compute_abc_classification_level_id",
inverse="_inverse_abc_classification_level_id",
store=True,
)
@api.depends(
"product_variant_ids", "product_variant_ids.abc_classification_profile_id"
)
def _compute_abc_classification_profile_id(self):
unique_variants = self.filtered(
lambda template: len(template.product_variant_ids) == 1
)
for template in unique_variants:
template.abc_classification_profile_id = (
template.product_variant_ids.abc_classification_profile_id
)
for template in self - unique_variants:
template.abc_classification_profile_id = False
@api.depends(
"product_variant_ids", "product_variant_ids.abc_classification_level_id"
)
def _compute_abc_classification_level_id(self):
unique_variants = self.filtered(
lambda template: len(template.product_variant_ids) == 1
)
for template in unique_variants:
template.abc_classification_level_id = (
template.product_variant_ids.abc_classification_level_id
)
for template in self - unique_variants:
template.abc_classification_level_id = False
def _inverse_abc_classification_profile_id(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.abc_classification_profile_id = (
template.abc_classification_profile_id
)
def _inverse_abc_classification_level_id(self):
for template in self:
if len(template.product_variant_ids) == 1:
template.product_variant_ids.abc_classification_level_id = (
template.abc_classification_level_id
)
class ProductProduct(models.Model):
_inherit = "product.product"
abc_classification_profile_id = fields.Many2one(
"abc.classification.profile", index=True
)
abc_classification_level_id = fields.Many2one(
"abc.classification.profile.level", index=True
)
| 36.337838 | 2,689 |
9,806 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from datetime import timedelta
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools import float_is_zero
class ABCClassificationProfile(models.Model):
_name = "abc.classification.profile"
_description = "ABC Classification Profile"
name = fields.Char()
level_ids = fields.One2many(
comodel_name="abc.classification.profile.level", inverse_name="profile_id"
)
classification_type = fields.Selection(
selection=[("percentage", "Percentage"), ("fixed", "Fixed")],
default="percentage",
required=True,
)
representation = fields.Char(compute="_compute_representation")
data_source = fields.Selection(
selection=[("stock_moves", "Stock Moves")],
default="stock_moves",
index=True,
required=True,
)
value_criteria = fields.Selection(
selection=[
("consumption_value", "Consumption Value"),
("sales_revenue", "Sales Revenue"),
("sales_volume", "Sales Volume"),
# others: 'profitability', ...
],
default="consumption_value",
string="Value",
index=True,
required=True,
)
past_period = fields.Integer(
default=365, string="Past demand period (Days)", required=True
)
days_to_ignore = fields.Integer(string="Ignore newer than these days")
product_variant_ids = fields.One2many(
"product.product", inverse_name="abc_classification_profile_id"
)
product_count = fields.Integer(compute="_compute_product_count", readonly=True)
company_id = fields.Many2one(comodel_name="res.company", string="Company")
@api.constrains("past_period", "days_to_ignore")
def _check_period(self):
for profile in self:
if profile.days_to_ignore > profile.past_period:
raise ValidationError(
_("The days to ignore can not be greater than the past period.")
)
@api.depends("level_ids")
def _compute_representation(self):
def _get_sort_key_percentage(rec):
return rec.percentage
for profile in self:
profile.level_ids.sorted(key=_get_sort_key_percentage, reverse=True)
profile.representation = "/".join(
[str(x) for x in profile.level_ids.mapped("display_name")]
)
@api.constrains("level_ids")
def _check_levels(self):
for profile in self:
if profile.classification_type == "percentage":
percentages = profile.level_ids.mapped("percentage")
total = sum(percentages)
if profile.level_ids and total != 100.0:
raise ValidationError(
_("The sum of the percentages of the levels should be 100.")
)
@api.depends("product_variant_ids")
def _compute_product_count(self):
for profile in self:
profile.product_count = len(profile.product_variant_ids)
def action_view_products(self):
products = self.mapped("product_variant_ids")
action = self.env["ir.actions.act_window"].for_xml_id(
"product", "product_variant_action"
)
del action["context"]
if len(products) > 1:
action["domain"] = [("id", "in", products.ids)]
elif len(products) == 1:
form_view = [
(self.env.ref("product.product_variant_easy_edit_view").id, "form")
]
if "views" in action:
action["views"] = form_view + [
(state, view) for state, view in action["views"] if view != "form"
]
else:
action["views"] = form_view
action["res_id"] = products.id
else:
action = {"type": "ir.actions.act_window_close"}
return action
def _fill_initial_product_data(self, date, date_end=False):
product_list = []
if self.data_source == "stock_moves":
return self._fill_data_from_stock_moves(
date, product_list, date_end=date_end
)
else:
return product_list
def _fill_data_from_stock_moves(self, date, product_list, date_end=False):
self.ensure_one()
domain = [
("state", "=", "done"),
("date", ">=", date),
("location_dest_id.usage", "=", "customer"),
("location_id.usage", "!=", "customer"),
("product_id.type", "=", "product"),
"|",
("product_id.abc_classification_profile_id", "=", self.id),
"|",
("product_id.categ_id.abc_classification_profile_id", "=", self.id),
(
"product_id.categ_id.parent_id.abc_classification_profile_id",
"=",
self.id,
),
]
if date_end:
domain.append(("date", "<=", date_end))
moves = (
self.env["stock.move"]
.sudo()
.read_group(
domain,
["product_id", "product_qty"],
["product_id"],
)
)
for move in moves:
product_data = {
"product": self.env["product.product"].browse(move["product_id"][0]),
"units_sold": move["product_qty"],
}
product_list.append(product_data)
return product_list
def _get_inventory_product_value(self, data):
self.ensure_one()
if self.value_criteria == "consumption_value":
return data["unit_cost"] * data["units_sold"]
elif self.value_criteria == "sales_revenue":
return data["unit_price"] * data["units_sold"]
elif self.value_criteria == "sales_volume":
return data["units_sold"]
return 0.0
@api.model
def _get_sort_key_percentage(self, rec):
return rec.percentage
@api.model
def _get_sort_key_fixed(self, rec):
return rec.fixed
@api.model
def _compute_abc_classification(self):
def _get_sort_key_value(data):
return data["value"]
profiles = self.search([]).filtered(lambda p: p.level_ids)
for profile in profiles:
oldest_date = fields.Datetime.to_string(
fields.Datetime.today() - timedelta(days=profile.past_period)
)
final_date = fields.Datetime.to_string(
fields.Datetime.today() - timedelta(days=profile.days_to_ignore)
)
totals = {
"units_sold": 0,
"value": 0.0,
}
product_list = profile._fill_initial_product_data(oldest_date, final_date)
for product_data in product_list:
product_data["unit_cost"] = product_data["product"].standard_price
product_data["unit_price"] = product_data["product"].list_price
totals["units_sold"] += product_data["units_sold"]
product_data["value"] = profile._get_inventory_product_value(
product_data
)
totals["value"] += product_data["value"]
product_list.sort(reverse=True, key=_get_sort_key_value)
levels = profile.level_ids.sorted(
key=getattr(self, "_get_sort_key_%s" % profile.classification_type),
reverse=True,
)
if profile.classification_type == "percentage":
level_percentage = [[level, level.percentage] for level in levels]
current_value = 0
accumulated_percentage = level_percentage[0][1]
for product_data in product_list:
# Accumulated current value
current_value += product_data["value"] or 0.0
# This comparison would be the same as:
# current_value * 100 / totals["value"] > accumulated_percentage,
# but it is written in the next way to avoid division and decimal lost.
while (
current_value * 100 > accumulated_percentage * totals["value"]
and len(level_percentage) > 1
):
level_percentage.pop(0)
accumulated_percentage += level_percentage[0][1]
product_data[
"product"
].abc_classification_level_id = level_percentage[0][0]
elif profile.classification_type == "fixed":
if product_list:
zero_level = profile.level_ids.filtered(
lambda l: float_is_zero(l.fixed, precision_digits=2)
)
self.env["product.product"].search(
[("abc_classification_profile_id", "=", profile.id)]
).write({"abc_classification_level_id": zero_level.id})
current_value = 0
for product_data in product_list:
level_fixed = [[level, level.fixed] for level in levels]
fixed_value = level_fixed[0][1]
current_value = product_data["value"] or 0.0
while current_value < fixed_value and len(level_fixed) > 1:
level_fixed.pop(0)
fixed_value = level_fixed[0][1]
product_data[
"product"
].abc_classification_level_id = level_fixed[0][0]
| 40.520661 | 9,806 |
2,820 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ForgeFlow
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ABCClassificationProfileLevel(models.Model):
_name = "abc.classification.profile.level"
_description = "ABC Classification Profile Level"
_order = "sequence desc, id desc"
classification_type = fields.Selection(
related="profile_id.classification_type", string="Classification Type"
)
percentage = fields.Float(default=0.0, string="%")
fixed = fields.Monetary(default=0.0, string="Fixed Value")
company_id = fields.Many2one(
comodel_name="res.company",
string="Company",
related="profile_id.company_id",
store=True,
)
currency_id = fields.Many2one(
"res.currency", string="Currency", store=True, related="company_id.currency_id"
)
profile_id = fields.Many2one("abc.classification.profile")
sequence = fields.Integer(compute="_compute_sequence", store=True)
symbol = fields.Char(compute="_compute_symbol", store=True)
def name_get(self):
res = []
for profile in self.mapped("profile_id"):
classification_type = profile.classification_type
for i, level in enumerate(
profile.level_ids.sorted(
key=getattr(
profile, "_get_sort_key_%s" % profile.classification_type
),
reverse=True,
)
):
name = "{} ({} {})".format(
chr(65 + i), level[classification_type], level.symbol
)
res += [(level.id, name)]
return res
@api.constrains("percentage")
def _check_percentage(self):
for level in self:
if level.classification_type != "percentage":
continue
if level.percentage > 100.0:
raise ValidationError(_("The percentage cannot be greater than 100."))
elif level.percentage <= 0.0:
raise ValidationError(_("The percentage should be a positive number."))
@api.depends("percentage", "fixed", "classification_type")
def _compute_sequence(self):
for level in self:
if level.classification_type == "percentage":
level.sequence = level.percentage
elif level.classification_type == "fixed":
level.sequence = level.fixed
@api.depends("classification_type")
def _compute_symbol(self):
for level in self:
if level.classification_type == "percentage":
level.symbol = "%"
elif level.classification_type == "fixed":
level.symbol = level.currency_id.symbol
| 38.630137 | 2,820 |
457 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Product Pricelist Per Contact",
"version": "15.0.1.0.0",
"category": "Product",
"website": "https://github.com/OCA/product-attribute",
"author": "Tecnativa, Odoo Community Association (OCA)",
"license": "AGPL-3",
"installable": True,
"depends": ["product"],
"data": ["views/res_partner_views.xml"],
}
| 32.642857 | 457 |
602 |
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# @author Iván Todorovich <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Inventory Product Template Tags",
"summary": "Show product tags menu in Inventory app",
"version": "15.0.1.0.0",
"license": "AGPL-3",
"author": "Camptocamp SA, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-attribute",
"depends": ["product_template_tags", "stock"],
"data": ["views/product_template_tag.xml"],
"maintainers": ["ivantodorovich"],
"auto_install": True,
}
| 37.5625 | 601 |
482 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
{
"name": "Scheduled Actions as Queue Jobs",
"version": "15.0.1.0.0",
"author": "ACSONE SA/NV,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/queue",
"license": "AGPL-3",
"category": "Generic Modules",
"depends": ["queue_job"],
"data": ["data/data.xml", "views/ir_cron_view.xml"],
"installable": True,
}
| 34.428571 | 482 |
1,685 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class TestQueueJobCron(TransactionCase):
def setUp(self):
super().setUp()
def test_queue_job_cron(self):
QueueJob = self.env["queue.job"]
default_channel = self.env.ref("queue_job_cron.channel_root_ir_cron")
cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs")
self.assertFalse(cron.run_as_queue_job)
cron.method_direct_trigger()
nb_jobs = QueueJob.search_count([("name", "=", cron.name)])
self.assertEqual(nb_jobs, 0)
cron.write({"run_as_queue_job": True, "channel_id": default_channel.id})
cron.method_direct_trigger()
qjob = QueueJob.search([("name", "=", cron.name)])
self.assertTrue(qjob)
self.assertEqual(qjob.name, cron.name)
self.assertEqual(qjob.priority, cron.priority)
self.assertEqual(qjob.user_id, cron.user_id)
self.assertEqual(qjob.channel, cron.channel_id.complete_name)
def test_queue_job_cron_depends(self):
cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs")
default_channel = self.env.ref("queue_job_cron.channel_root_ir_cron")
self.assertFalse(cron.run_as_queue_job)
cron.write({"run_as_queue_job": True})
self.assertEqual(cron.channel_id.id, default_channel.id)
def test_queue_job_cron_run(self):
cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs")
IrCron = self.env["ir.cron"]
IrCron._run_job_as_queue_job(server_action=cron.ir_actions_server_id)
| 41.097561 | 1,685 |
2,177 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import logging
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
class IrCron(models.Model):
_inherit = "ir.cron"
run_as_queue_job = fields.Boolean(
help="Specify if this cron should be ran as a queue job"
)
channel_id = fields.Many2one(
comodel_name="queue.job.channel",
compute="_compute_run_as_queue_job",
readonly=False,
string="Channel",
)
@api.depends("run_as_queue_job")
def _compute_run_as_queue_job(self):
for cron in self:
if cron.run_as_queue_job and not cron.channel_id:
cron.channel_id = self.env.ref("queue_job_cron.channel_root_ir_cron").id
else:
cron.channel_id = False
def _run_job_as_queue_job(self, server_action):
return server_action.run()
def method_direct_trigger(self):
for cron in self:
if not cron.run_as_queue_job:
super(IrCron, cron).method_direct_trigger()
else:
_cron = cron.with_user(cron.user_id).with_context(
lastcall=cron.lastcall
)
_cron.with_delay(
priority=_cron.priority,
description=_cron.name,
channel=_cron.channel_id.complete_name,
)._run_job_as_queue_job(server_action=_cron.ir_actions_server_id)
return True
def _callback(self, cron_name, server_action_id, job_id):
cron = self.env["ir.cron"].sudo().browse(job_id)
if cron.run_as_queue_job:
server_action = self.env["ir.actions.server"].browse(server_action_id)
return self.with_delay(
priority=cron.priority,
description=cron.name,
channel=cron.channel_id.complete_name,
)._run_job_as_queue_job(server_action=server_action)
else:
return super()._callback(
cron_name=cron_name, server_action_id=server_action_id, job_id=job_id
)
| 35.688525 | 2,177 |
1,167 |
py
|
PYTHON
|
15.0
|
# Copyright 2012-2016 Camptocamp
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
class BaseQueueJobError(Exception):
"""Base queue job error"""
class JobError(BaseQueueJobError):
"""A job had an error"""
class NoSuchJobError(JobError):
"""The job does not exist."""
class FailedJobError(JobError):
"""A job had an error having to be resolved."""
class RetryableJobError(JobError):
"""A job had an error but can be retried.
The job will be retried after the given number of seconds. If seconds is
empty, it will be retried according to the ``retry_pattern`` of the job or
by :const:`odoo.addons.queue_job.job.RETRY_INTERVAL` if nothing is defined.
If ``ignore_retry`` is True, the retry counter will not be increased.
"""
def __init__(self, msg, seconds=None, ignore_retry=False):
super().__init__(msg)
self.seconds = seconds
self.ignore_retry = ignore_retry
# TODO: remove support of NothingToDo: too dangerous
class NothingToDoJob(JobError):
"""The Job has nothing to do."""
class ChannelNotFound(BaseQueueJobError):
"""A channel could not be found"""
| 27.139535 | 1,167 |
4,316 |
py
|
PYTHON
|
15.0
|
# copyright 2016 Camptocamp
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import json
from datetime import date, datetime
import dateutil
import lxml
from odoo import fields, models
from odoo.tools.func import lazy
class JobSerialized(fields.Field):
"""Provide the storage for job fields stored as json
A base_type must be set, it must be dict, list or tuple.
When the field is not set, the json will be the corresponding
json string ("{}" or "[]").
Support for some custom types has been added to the json decoder/encoder
(see JobEncoder and JobDecoder).
"""
type = "job_serialized"
column_type = ("text", "text")
_base_type = None
# these are the default values when we convert an empty value
_default_json_mapping = {
dict: "{}",
list: "[]",
tuple: "[]",
models.BaseModel: lambda env: json.dumps(
{"_type": "odoo_recordset", "model": "base", "ids": [], "uid": env.uid}
),
}
def __init__(self, string=fields.Default, base_type=fields.Default, **kwargs):
super().__init__(string=string, _base_type=base_type, **kwargs)
def _setup_attrs(self, model, name): # pylint: disable=missing-return
super()._setup_attrs(model, name)
if self._base_type not in self._default_json_mapping:
raise ValueError("%s is not a supported base type" % (self._base_type))
def _base_type_default_json(self, env):
default_json = self._default_json_mapping.get(self._base_type)
if not isinstance(default_json, str):
default_json = default_json(env)
return default_json
def convert_to_column(self, value, record, values=None, validate=True):
return self.convert_to_cache(value, record, validate=validate)
def convert_to_cache(self, value, record, validate=True):
# cache format: json.dumps(value) or None
if isinstance(value, self._base_type):
return json.dumps(value, cls=JobEncoder)
else:
return value or None
def convert_to_record(self, value, record):
default = self._base_type_default_json(record.env)
return json.loads(value or default, cls=JobDecoder, env=record.env)
class JobEncoder(json.JSONEncoder):
"""Encode Odoo recordsets so that we can later recompose them"""
def _get_record_context(self, obj):
return obj._job_prepare_context_before_enqueue()
def default(self, obj):
if isinstance(obj, models.BaseModel):
return {
"_type": "odoo_recordset",
"model": obj._name,
"ids": obj.ids,
"uid": obj.env.uid,
"su": obj.env.su,
"context": self._get_record_context(obj),
}
elif isinstance(obj, datetime):
return {"_type": "datetime_isoformat", "value": obj.isoformat()}
elif isinstance(obj, date):
return {"_type": "date_isoformat", "value": obj.isoformat()}
elif isinstance(obj, lxml.etree._Element):
return {
"_type": "etree_element",
"value": lxml.etree.tostring(obj, encoding=str),
}
elif isinstance(obj, lazy):
return obj._value
return json.JSONEncoder.default(self, obj)
class JobDecoder(json.JSONDecoder):
"""Decode json, recomposing recordsets"""
def __init__(self, *args, **kwargs):
env = kwargs.pop("env")
super().__init__(object_hook=self.object_hook, *args, **kwargs)
assert env
self.env = env
def object_hook(self, obj):
if "_type" not in obj:
return obj
type_ = obj["_type"]
if type_ == "odoo_recordset":
model = self.env(user=obj.get("uid"), su=obj.get("su"))[obj["model"]]
if obj.get("context"):
model = model.with_context(**obj.get("context"))
return model.browse(obj["ids"])
elif type_ == "datetime_isoformat":
return dateutil.parser.parse(obj["value"])
elif type_ == "date_isoformat":
return dateutil.parser.parse(obj["value"]).date()
elif type_ == "etree_element":
return lxml.etree.fromstring(obj["value"])
return obj
| 35.089431 | 4,316 |
1,087 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
logger = logging.getLogger(__name__)
def post_init_hook(cr, registry):
# this is the trigger that sends notifications when jobs change
logger.info("Create queue_job_notify trigger")
cr.execute(
"""
DROP TRIGGER IF EXISTS queue_job_notify ON queue_job;
CREATE OR REPLACE
FUNCTION queue_job_notify() RETURNS trigger AS $$
BEGIN
IF TG_OP = 'DELETE' THEN
IF OLD.state != 'done' THEN
PERFORM pg_notify('queue_job', OLD.uuid);
END IF;
ELSE
PERFORM pg_notify('queue_job', NEW.uuid);
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER queue_job_notify
AFTER INSERT OR UPDATE OR DELETE
ON queue_job
FOR EACH ROW EXECUTE PROCEDURE queue_job_notify();
"""
)
| 32.939394 | 1,087 |
27,668 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import hashlib
import inspect
import logging
import os
import sys
import uuid
import weakref
from datetime import datetime, timedelta
from functools import total_ordering
from random import randint
import odoo
from .exception import FailedJobError, NoSuchJobError, RetryableJobError
WAIT_DEPENDENCIES = "wait_dependencies"
PENDING = "pending"
ENQUEUED = "enqueued"
CANCELLED = "cancelled"
DONE = "done"
STARTED = "started"
FAILED = "failed"
STATES = [
(WAIT_DEPENDENCIES, "Wait Dependencies"),
(PENDING, "Pending"),
(ENQUEUED, "Enqueued"),
(STARTED, "Started"),
(DONE, "Done"),
(CANCELLED, "Cancelled"),
(FAILED, "Failed"),
]
DEFAULT_PRIORITY = 10 # used by the PriorityQueue to sort the jobs
DEFAULT_MAX_RETRIES = 5
RETRY_INTERVAL = 10 * 60 # seconds
_logger = logging.getLogger(__name__)
# TODO remove in 15.0 or 16.0, used to keep compatibility as the
# class has been moved in 'delay'.
def DelayableRecordset(*args, **kwargs):
# prevent circular import
from .delay import DelayableRecordset as dr
_logger.debug(
"DelayableRecordset moved from the queue_job.job"
" to the queue_job.delay python module"
)
return dr(*args, **kwargs)
def identity_exact(job_):
"""Identity function using the model, method and all arguments as key
When used, this identity key will have the effect that when a job should be
created and a pending job with the exact same recordset and arguments, the
second will not be created.
It should be used with the ``identity_key`` argument:
.. python::
from odoo.addons.queue_job.job import identity_exact
# [...]
delayable = self.with_delay(identity_key=identity_exact)
delayable.export_record(force=True)
Alternative identity keys can be built using the various fields of the job.
For example, you could compute a hash using only some arguments of
the job.
.. python::
def identity_example(job_):
hasher = hashlib.sha1()
hasher.update(job_.model_name)
hasher.update(job_.method_name)
hasher.update(str(sorted(job_.recordset.ids)))
hasher.update(str(job_.args[1]))
hasher.update(str(job_.kwargs.get('foo', '')))
return hasher.hexdigest()
Usually you will probably always want to include at least the name of the
model and method.
"""
hasher = hashlib.sha1()
hasher.update(job_.model_name.encode("utf-8"))
hasher.update(job_.method_name.encode("utf-8"))
hasher.update(str(sorted(job_.recordset.ids)).encode("utf-8"))
hasher.update(str(job_.args).encode("utf-8"))
hasher.update(str(sorted(job_.kwargs.items())).encode("utf-8"))
return hasher.hexdigest()
@total_ordering
class Job(object):
"""A Job is a task to execute. It is the in-memory representation of a job.
Jobs are stored in the ``queue.job`` Odoo Model, but they are handled
through this class.
.. attribute:: uuid
Id (UUID) of the job.
.. attribute:: graph_uuid
Shared UUID of the job's graph. Empty if the job is a single job.
.. attribute:: state
State of the job, can pending, enqueued, started, done or failed.
The start state is pending and the final state is done.
.. attribute:: retry
The current try, starts at 0 and each time the job is executed,
it increases by 1.
.. attribute:: max_retries
The maximum number of retries allowed before the job is
considered as failed.
.. attribute:: args
Arguments passed to the function when executed.
.. attribute:: kwargs
Keyword arguments passed to the function when executed.
.. attribute:: description
Human description of the job.
.. attribute:: func
The python function itself.
.. attribute:: model_name
Odoo model on which the job will run.
.. attribute:: priority
Priority of the job, 0 being the higher priority.
.. attribute:: date_created
Date and time when the job was created.
.. attribute:: date_enqueued
Date and time when the job was enqueued.
.. attribute:: date_started
Date and time when the job was started.
.. attribute:: date_done
Date and time when the job was done.
.. attribute:: result
A description of the result (for humans).
.. attribute:: exc_name
Exception error name when the job failed.
.. attribute:: exc_message
Exception error message when the job failed.
.. attribute:: exc_info
Exception information (traceback) when the job failed.
.. attribute:: user_id
Odoo user id which created the job
.. attribute:: eta
Estimated Time of Arrival of the job. It will not be executed
before this date/time.
.. attribute:: recordset
Model recordset when we are on a delayed Model method
.. attribute::channel
The complete name of the channel to use to process the job. If
provided it overrides the one defined on the job's function.
.. attribute::identity_key
A key referencing the job, multiple job with the same key will not
be added to a channel if the existing job with the same key is not yet
started or executed.
"""
@classmethod
def load(cls, env, job_uuid):
"""Read a single job from the Database
Raise an error if the job is not found.
"""
stored = cls.db_records_from_uuids(env, [job_uuid])
if not stored:
raise NoSuchJobError(
"Job %s does no longer exist in the storage." % job_uuid
)
return cls._load_from_db_record(stored)
@classmethod
def load_many(cls, env, job_uuids):
"""Read jobs in batch from the Database
Jobs not found are ignored.
"""
recordset = cls.db_records_from_uuids(env, job_uuids)
return {cls._load_from_db_record(record) for record in recordset}
@classmethod
def _load_from_db_record(cls, job_db_record):
stored = job_db_record
args = stored.args
kwargs = stored.kwargs
method_name = stored.method_name
recordset = stored.records
method = getattr(recordset, method_name)
eta = None
if stored.eta:
eta = stored.eta
job_ = cls(
method,
args=args,
kwargs=kwargs,
priority=stored.priority,
eta=eta,
job_uuid=stored.uuid,
description=stored.name,
channel=stored.channel,
identity_key=stored.identity_key,
)
if stored.date_created:
job_.date_created = stored.date_created
if stored.date_enqueued:
job_.date_enqueued = stored.date_enqueued
if stored.date_started:
job_.date_started = stored.date_started
if stored.date_done:
job_.date_done = stored.date_done
if stored.date_cancelled:
job_.date_cancelled = stored.date_cancelled
job_.state = stored.state
job_.graph_uuid = stored.graph_uuid if stored.graph_uuid else None
job_.result = stored.result if stored.result else None
job_.exc_info = stored.exc_info if stored.exc_info else None
job_.retry = stored.retry
job_.max_retries = stored.max_retries
if stored.company_id:
job_.company_id = stored.company_id.id
job_.identity_key = stored.identity_key
job_.worker_pid = stored.worker_pid
job_.__depends_on_uuids.update(stored.dependencies.get("depends_on", []))
job_.__reverse_depends_on_uuids.update(
stored.dependencies.get("reverse_depends_on", [])
)
return job_
def job_record_with_same_identity_key(self):
"""Check if a job to be executed with the same key exists."""
existing = (
self.env["queue.job"]
.sudo()
.search(
[
("identity_key", "=", self.identity_key),
("state", "in", [PENDING, ENQUEUED]),
],
limit=1,
)
)
return existing
# TODO to deprecate (not called anymore)
@classmethod
def enqueue(
cls,
func,
args=None,
kwargs=None,
priority=None,
eta=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
"""Create a Job and enqueue it in the queue. Return the job uuid.
This expects the arguments specific to the job to be already extracted
from the ones to pass to the job function.
If the identity key is the same than the one in a pending job,
no job is created and the existing job is returned
"""
new_job = cls(
func=func,
args=args,
kwargs=kwargs,
priority=priority,
eta=eta,
max_retries=max_retries,
description=description,
channel=channel,
identity_key=identity_key,
)
return new_job._enqueue_job()
# TODO to deprecate (not called anymore)
def _enqueue_job(self):
if self.identity_key:
existing = self.job_record_with_same_identity_key()
if existing:
_logger.debug(
"a job has not been enqueued due to having "
"the same identity key (%s) than job %s",
self.identity_key,
existing.uuid,
)
return Job._load_from_db_record(existing)
self.store()
_logger.debug(
"enqueued %s:%s(*%r, **%r) with uuid: %s",
self.recordset,
self.method_name,
self.args,
self.kwargs,
self.uuid,
)
return self
@staticmethod
def db_record_from_uuid(env, job_uuid):
# TODO remove in 15.0 or 16.0
_logger.debug("deprecated, use 'db_records_from_uuids")
return Job.db_records_from_uuids(env, [job_uuid])
@staticmethod
def db_records_from_uuids(env, job_uuids):
model = env["queue.job"].sudo()
record = model.search([("uuid", "in", tuple(job_uuids))])
return record.with_env(env).sudo()
def __init__(
self,
func,
args=None,
kwargs=None,
priority=None,
eta=None,
job_uuid=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
"""Create a Job
:param func: function to execute
:type func: function
:param args: arguments for func
:type args: tuple
:param kwargs: keyworkd arguments for func
:type kwargs: dict
:param priority: priority of the job,
the smaller is the higher priority
:type priority: int
:param eta: the job can be executed only after this datetime
(or now + timedelta)
:type eta: datetime or timedelta
:param job_uuid: UUID of the job
:param max_retries: maximum number of retries before giving up and set
the job state to 'failed'. A value of 0 means infinite retries.
:param description: human description of the job. If None, description
is computed from the function doc or name
:param channel: The complete channel name to use to process the job.
:param identity_key: A hash to uniquely identify a job, or a function
that returns this hash (the function takes the job
as argument)
"""
if args is None:
args = ()
if isinstance(args, list):
args = tuple(args)
assert isinstance(args, tuple), "%s: args are not a tuple" % args
if kwargs is None:
kwargs = {}
assert isinstance(kwargs, dict), "%s: kwargs are not a dict" % kwargs
if not _is_model_method(func):
raise TypeError("Job accepts only methods of Models")
recordset = func.__self__
env = recordset.env
self.method_name = func.__name__
self.recordset = recordset
self.env = env
self.job_model = self.env["queue.job"]
self.job_model_name = "queue.job"
self.job_config = (
self.env["queue.job.function"].sudo().job_config(self.job_function_name)
)
self.state = PENDING
self.retry = 0
if max_retries is None:
self.max_retries = DEFAULT_MAX_RETRIES
else:
self.max_retries = max_retries
self._uuid = job_uuid
self.graph_uuid = None
self.args = args
self.kwargs = kwargs
self.__depends_on_uuids = set()
self.__reverse_depends_on_uuids = set()
self._depends_on = set()
self._reverse_depends_on = weakref.WeakSet()
self.priority = priority
if self.priority is None:
self.priority = DEFAULT_PRIORITY
self.date_created = datetime.now()
self._description = description
if isinstance(identity_key, str):
self._identity_key = identity_key
self._identity_key_func = None
else:
# we'll compute the key on the fly when called
# from the function
self._identity_key = None
self._identity_key_func = identity_key
self.date_enqueued = None
self.date_started = None
self.date_done = None
self.date_cancelled = None
self.result = None
self.exc_name = None
self.exc_message = None
self.exc_info = None
if "company_id" in env.context:
company_id = env.context["company_id"]
else:
company_id = env.company.id
self.company_id = company_id
self._eta = None
self.eta = eta
self.channel = channel
self.worker_pid = None
def add_depends(self, jobs):
if self in jobs:
raise ValueError("job cannot depend on itself")
self.__depends_on_uuids |= {j.uuid for j in jobs}
self._depends_on.update(jobs)
for parent in jobs:
parent.__reverse_depends_on_uuids.add(self.uuid)
parent._reverse_depends_on.add(self)
if any(j.state != DONE for j in jobs):
self.state = WAIT_DEPENDENCIES
def perform(self):
"""Execute the job.
The job is executed with the user which has initiated it.
"""
self.retry += 1
try:
self.result = self.func(*tuple(self.args), **self.kwargs)
except RetryableJobError as err:
if err.ignore_retry:
self.retry -= 1
raise
elif not self.max_retries: # infinite retries
raise
elif self.retry >= self.max_retries:
type_, value, traceback = sys.exc_info()
# change the exception type but keep the original
# traceback and message:
# http://blog.ianbicking.org/2007/09/12/re-raising-exceptions/
new_exc = FailedJobError(
"Max. retries (%d) reached: %s" % (self.max_retries, value or type_)
)
raise new_exc from err
raise
return self.result
def enqueue_waiting(self):
sql = """
UPDATE queue_job
SET state = %s
FROM (
SELECT child.id, array_agg(parent.state) as parent_states
FROM queue_job job
JOIN LATERAL
json_array_elements_text(
job.dependencies::json->'reverse_depends_on'
) child_deps ON true
JOIN queue_job child
ON child.graph_uuid = job.graph_uuid
AND child.uuid = child_deps
JOIN LATERAL
json_array_elements_text(
child.dependencies::json->'depends_on'
) parent_deps ON true
JOIN queue_job parent
ON parent.graph_uuid = job.graph_uuid
AND parent.uuid = parent_deps
WHERE job.uuid = %s
GROUP BY child.id
) jobs
WHERE
queue_job.id = jobs.id
AND %s = ALL(jobs.parent_states)
AND state = %s;
"""
self.env.cr.execute(sql, (PENDING, self.uuid, DONE, WAIT_DEPENDENCIES))
self.env["queue.job"].invalidate_cache(["state"])
def store(self):
"""Store the Job"""
job_model = self.env["queue.job"]
# The sentinel is used to prevent edition sensitive fields (such as
# method_name) from RPC methods.
edit_sentinel = job_model.EDIT_SENTINEL
db_record = self.db_record()
if db_record:
db_record.with_context(_job_edit_sentinel=edit_sentinel).write(
self._store_values()
)
else:
job_model.with_context(_job_edit_sentinel=edit_sentinel).sudo().create(
self._store_values(create=True)
)
def _store_values(self, create=False):
vals = {
"state": self.state,
"priority": self.priority,
"retry": self.retry,
"max_retries": self.max_retries,
"exc_name": self.exc_name,
"exc_message": self.exc_message,
"exc_info": self.exc_info,
"company_id": self.company_id,
"result": str(self.result) if self.result else False,
"date_enqueued": False,
"date_started": False,
"date_done": False,
"exec_time": False,
"date_cancelled": False,
"eta": False,
"identity_key": False,
"worker_pid": self.worker_pid,
"graph_uuid": self.graph_uuid,
}
if self.date_enqueued:
vals["date_enqueued"] = self.date_enqueued
if self.date_started:
vals["date_started"] = self.date_started
if self.date_done:
vals["date_done"] = self.date_done
if self.exec_time:
vals["exec_time"] = self.exec_time
if self.date_cancelled:
vals["date_cancelled"] = self.date_cancelled
if self.eta:
vals["eta"] = self.eta
if self.identity_key:
vals["identity_key"] = self.identity_key
dependencies = {
"depends_on": [parent.uuid for parent in self.depends_on],
"reverse_depends_on": [
children.uuid for children in self.reverse_depends_on
],
}
vals["dependencies"] = dependencies
if create:
vals.update(
{
"user_id": self.env.uid,
"channel": self.channel,
# The following values must never be modified after the
# creation of the job
"uuid": self.uuid,
"name": self.description,
"func_string": self.func_string,
"date_created": self.date_created,
"model_name": self.recordset._name,
"method_name": self.method_name,
"job_function_id": self.job_config.job_function_id,
"channel_method_name": self.job_function_name,
"records": self.recordset,
"args": self.args,
"kwargs": self.kwargs,
}
)
vals_from_model = self._store_values_from_model()
# Sanitize values: make sure you cannot screw core values
vals_from_model = {k: v for k, v in vals_from_model.items() if k not in vals}
vals.update(vals_from_model)
return vals
def _store_values_from_model(self):
vals = {}
value_handlers_candidates = (
"_job_store_values_for_" + self.method_name,
"_job_store_values",
)
for candidate in value_handlers_candidates:
handler = getattr(self.recordset, candidate, None)
if handler is not None:
vals = handler(self)
return vals
@property
def func_string(self):
model = repr(self.recordset)
args = [repr(arg) for arg in self.args]
kwargs = ["{}={!r}".format(key, val) for key, val in self.kwargs.items()]
all_args = ", ".join(args + kwargs)
return "{}.{}({})".format(model, self.method_name, all_args)
def __eq__(self, other):
return self.uuid == other.uuid
def __hash__(self):
return self.uuid.__hash__()
def sorting_key(self):
return self.eta, self.priority, self.date_created, self.seq
def __lt__(self, other):
if self.eta and not other.eta:
return True
elif not self.eta and other.eta:
return False
return self.sorting_key() < other.sorting_key()
def db_record(self):
return self.db_records_from_uuids(self.env, [self.uuid])
@property
def func(self):
recordset = self.recordset.with_context(job_uuid=self.uuid)
return getattr(recordset, self.method_name)
@property
def job_function_name(self):
func_model = self.env["queue.job.function"].sudo()
return func_model.job_function_name(self.recordset._name, self.method_name)
@property
def identity_key(self):
if self._identity_key is None:
if self._identity_key_func:
self._identity_key = self._identity_key_func(self)
return self._identity_key
@identity_key.setter
def identity_key(self, value):
if isinstance(value, str):
self._identity_key = value
self._identity_key_func = None
else:
# we'll compute the key on the fly when called
# from the function
self._identity_key = None
self._identity_key_func = value
@property
def depends_on(self):
if not self._depends_on:
self._depends_on = Job.load_many(self.env, self.__depends_on_uuids)
return self._depends_on
@property
def reverse_depends_on(self):
if not self._reverse_depends_on:
self._reverse_depends_on = Job.load_many(
self.env, self.__reverse_depends_on_uuids
)
return set(self._reverse_depends_on)
@property
def description(self):
if self._description:
return self._description
elif self.func.__doc__:
return self.func.__doc__.splitlines()[0].strip()
else:
return "{}.{}".format(self.model_name, self.func.__name__)
@property
def uuid(self):
"""Job ID, this is an UUID"""
if self._uuid is None:
self._uuid = str(uuid.uuid4())
return self._uuid
@property
def model_name(self):
return self.recordset._name
@property
def user_id(self):
return self.recordset.env.uid
@property
def eta(self):
return self._eta
@eta.setter
def eta(self, value):
if not value:
self._eta = None
elif isinstance(value, timedelta):
self._eta = datetime.now() + value
elif isinstance(value, int):
self._eta = datetime.now() + timedelta(seconds=value)
else:
self._eta = value
@property
def channel(self):
return self._channel or self.job_config.channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def exec_time(self):
if self.date_done and self.date_started:
return (self.date_done - self.date_started).total_seconds()
return None
def set_pending(self, result=None, reset_retry=True):
if any(j.state != DONE for j in self.depends_on):
self.state = WAIT_DEPENDENCIES
else:
self.state = PENDING
self.date_enqueued = None
self.date_started = None
self.date_done = None
self.worker_pid = None
self.date_cancelled = None
if reset_retry:
self.retry = 0
if result is not None:
self.result = result
def set_enqueued(self):
self.state = ENQUEUED
self.date_enqueued = datetime.now()
self.date_started = None
self.worker_pid = None
def set_started(self):
self.state = STARTED
self.date_started = datetime.now()
self.worker_pid = os.getpid()
def set_done(self, result=None):
self.state = DONE
self.exc_name = None
self.exc_info = None
self.date_done = datetime.now()
if result is not None:
self.result = result
def set_cancelled(self, result=None):
self.state = CANCELLED
self.date_cancelled = datetime.now()
if result is not None:
self.result = result
def set_failed(self, **kw):
self.state = FAILED
for k, v in kw.items():
if v is not None:
setattr(self, k, v)
def __repr__(self):
return "<Job %s, priority:%d>" % (self.uuid, self.priority)
def _get_retry_seconds(self, seconds=None):
retry_pattern = self.job_config.retry_pattern
if not seconds and retry_pattern:
# ordered from higher to lower count of retries
patt = sorted(retry_pattern.items(), key=lambda t: t[0])
seconds = RETRY_INTERVAL
for retry_count, postpone_seconds in patt:
if self.retry >= retry_count:
seconds = postpone_seconds
else:
break
elif not seconds:
seconds = RETRY_INTERVAL
if isinstance(seconds, (list, tuple)):
seconds = randint(seconds[0], seconds[1])
return seconds
def postpone(self, result=None, seconds=None):
"""Postpone the job
Write an estimated time arrival to n seconds
later than now. Used when an retryable exception
want to retry a job later.
"""
eta_seconds = self._get_retry_seconds(seconds)
self.eta = timedelta(seconds=eta_seconds)
self.exc_name = None
self.exc_info = None
if result is not None:
self.result = result
def related_action(self):
record = self.db_record()
if not self.job_config.related_action_enable:
return None
funcname = self.job_config.related_action_func_name
if not funcname:
funcname = record._default_related_action
if not isinstance(funcname, str):
raise ValueError(
"related_action must be the name of the "
"method on queue.job as string"
)
action = getattr(record, funcname)
action_kwargs = self.job_config.related_action_kwargs
return action(**action_kwargs)
def _is_model_method(func):
return inspect.ismethod(func) and isinstance(
func.__self__.__class__, odoo.models.MetaModel
)
| 30.983203 | 27,668 |
1,334 |
py
|
PYTHON
|
15.0
|
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
{
"name": "Job Queue",
"version": "15.0.2.3.0",
"author": "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/queue",
"license": "LGPL-3",
"category": "Generic Modules",
"depends": ["mail", "base_sparse_field"],
"external_dependencies": {"python": ["requests"]},
"data": [
"security/security.xml",
"security/ir.model.access.csv",
"views/queue_job_views.xml",
"views/queue_job_channel_views.xml",
"views/queue_job_function_views.xml",
"wizards/queue_jobs_to_done_views.xml",
"wizards/queue_jobs_to_cancelled_views.xml",
"wizards/queue_requeue_job_views.xml",
"views/queue_job_menus.xml",
"data/queue_data.xml",
"data/queue_job_function_data.xml",
],
"assets": {
"web.assets_backend": [
"/queue_job/static/lib/vis/vis-network.min.css",
"/queue_job/static/src/scss/queue_job_fields.scss",
"/queue_job/static/lib/vis/vis-network.min.js",
"/queue_job/static/src/js/queue_job_fields.js",
],
},
"installable": True,
"development_status": "Mature",
"maintainers": ["guewen"],
"post_init_hook": "post_init_hook",
}
| 36.054054 | 1,334 |
19,985 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Camptocamp
# Copyright 2019 Guewen Baconnier
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import itertools
import logging
import os
import uuid
from collections import defaultdict, deque
from .job import Job
_logger = logging.getLogger(__name__)
def group(*delayables):
"""Return a group of delayable to form a graph
A group means that jobs can be executed concurrently.
A job or a group of jobs depending on a group can be executed only after
all the jobs of the group are done.
Shortcut to :class:`~odoo.addons.queue_job.delay.DelayableGroup`.
Example::
g1 = group(delayable1, delayable2)
g2 = group(delayable3, delayable4)
g1.on_done(g2)
g1.delay()
"""
return DelayableGroup(*delayables)
def chain(*delayables):
"""Return a chain of delayable to form a graph
A chain means that jobs must be executed sequentially.
A job or a group of jobs depending on a group can be executed only after
the last job of the chain is done.
Shortcut to :class:`~odoo.addons.queue_job.delay.DelayableChain`.
Example::
chain1 = chain(delayable1, delayable2, delayable3)
chain2 = chain(delayable4, delayable5, delayable6)
chain1.on_done(chain2)
chain1.delay()
"""
return DelayableChain(*delayables)
class Graph:
"""Acyclic directed graph holding vertices of any hashable type
This graph is not specifically designed to hold :class:`~Delayable`
instances, although ultimately it is used for this purpose.
"""
__slots__ = "_graph"
def __init__(self, graph=None):
if graph:
self._graph = graph
else:
self._graph = {}
def add_vertex(self, vertex):
"""Add a vertex
Has no effect if called several times with the same vertex
"""
self._graph.setdefault(vertex, set())
def add_edge(self, parent, child):
"""Add an edge between a parent and a child vertex
Has no effect if called several times with the same pair of vertices
"""
self.add_vertex(child)
self._graph.setdefault(parent, set()).add(child)
def vertices(self):
"""Return the vertices (nodes) of the graph"""
return set(self._graph)
def edges(self):
"""Return the edges (links) of the graph"""
links = []
for vertex, neighbours in self._graph.items():
for neighbour in neighbours:
links.append((vertex, neighbour))
return links
# from
# https://codereview.stackexchange.com/questions/55767/finding-all-paths-from-a-given-graph
def paths(self, vertex):
"""Generate the maximal cycle-free paths in graph starting at vertex.
>>> g = {1: [2, 3], 2: [3, 4], 3: [1], 4: []}
>>> sorted(self.paths(1))
[[1, 2, 3], [1, 2, 4], [1, 3]]
>>> sorted(self.paths(3))
[[3, 1, 2, 4]]
"""
path = [vertex] # path traversed so far
seen = {vertex} # set of vertices in path
def search():
dead_end = True
for neighbour in self._graph[path[-1]]:
if neighbour not in seen:
dead_end = False
seen.add(neighbour)
path.append(neighbour)
yield from search()
path.pop()
seen.remove(neighbour)
if dead_end:
yield list(path)
yield from search()
def topological_sort(self):
"""Yields a proposed order of nodes to respect dependencies
The order is not unique, the result may vary, but it is guaranteed
that a node depending on another is not yielded before.
It assumes the graph has no cycle.
"""
depends_per_node = defaultdict(int)
for __, tail in self.edges():
depends_per_node[tail] += 1
# the queue contains only elements for which all dependencies
# are resolved
queue = deque(self.root_vertices())
while queue:
vertex = queue.popleft()
yield vertex
for node in self._graph[vertex]:
depends_per_node[node] -= 1
if not depends_per_node[node]:
queue.append(node)
def root_vertices(self):
"""Returns the root vertices
meaning they do not depend on any other job.
"""
dependency_vertices = set()
for dependencies in self._graph.values():
dependency_vertices.update(dependencies)
return set(self._graph.keys()) - dependency_vertices
def __repr__(self):
paths = [path for vertex in self.root_vertices() for path in self.paths(vertex)]
lines = []
for path in paths:
lines.append(" → ".join(repr(vertex) for vertex in path))
return "\n".join(lines)
class DelayableGraph(Graph):
"""Directed Graph for :class:`~Delayable` dependencies
It connects together the :class:`~Delayable`, :class:`~DelayableGroup` and
:class:`~DelayableChain` graphs, and creates then enqueued the jobs.
"""
def _merge_graph(self, graph):
"""Merge a graph in the current graph
It takes each vertex, which can be :class:`~Delayable`,
:class:`~DelayableChain` or :class:`~DelayableGroup`, and updates the
current graph with the edges between Delayable objects (connecting
heads and tails of the groups and chains), so that at the end, the
graph contains only Delayable objects and their links.
"""
for vertex, neighbours in graph._graph.items():
tails = vertex._tail()
for tail in tails:
# connect the tails with the heads of each node
heads = {head for n in neighbours for head in n._head()}
self._graph.setdefault(tail, set()).update(heads)
def _connect_graphs(self):
"""Visit the vertices' graphs and connect them, return the whole graph
Build a new graph, walk the vertices and their related vertices, merge
their graph in the new one, until we have visited all the vertices
"""
graph = DelayableGraph()
graph._merge_graph(self)
seen = set()
visit_stack = deque([self])
while visit_stack:
current = visit_stack.popleft()
if current in seen:
continue
vertices = current.vertices()
for vertex in vertices:
vertex_graph = vertex._graph
graph._merge_graph(vertex_graph)
visit_stack.append(vertex_graph)
seen.add(current)
return graph
def _has_to_execute_directly(self, vertices):
"""Used for tests to run tests directly instead of storing them
In tests, prefer to use
:func:`odoo.addons.queue_job.tests.common.trap_jobs`.
"""
if os.getenv("TEST_QUEUE_JOB_NO_DELAY"):
_logger.warning(
"`TEST_QUEUE_JOB_NO_DELAY` env var found. NO JOB scheduled."
)
return True
envs = {vertex.recordset.env for vertex in vertices}
for env in envs:
if env.context.get("test_queue_job_no_delay"):
_logger.warning(
"`test_queue_job_no_delay` ctx key found. NO JOB scheduled."
)
return True
return False
@staticmethod
def _ensure_same_graph_uuid(jobs):
"""Set the same graph uuid on all jobs of the same graph"""
jobs_count = len(jobs)
if jobs_count == 0:
raise ValueError("Expecting jobs")
elif jobs_count == 1:
if jobs[0].graph_uuid:
raise ValueError(
"Job %s is a single job, it should not"
" have a graph uuid" % (jobs[0],)
)
else:
graph_uuids = {job.graph_uuid for job in jobs if job.graph_uuid}
if len(graph_uuids) > 1:
raise ValueError("Jobs cannot have dependencies between several graphs")
elif len(graph_uuids) == 1:
graph_uuid = graph_uuids.pop()
else:
graph_uuid = str(uuid.uuid4())
for job in jobs:
job.graph_uuid = graph_uuid
def delay(self):
"""Build the whole graph, creates jobs and delay them"""
graph = self._connect_graphs()
vertices = graph.vertices()
for vertex in vertices:
vertex._build_job()
self._ensure_same_graph_uuid([vertex._generated_job for vertex in vertices])
if self._has_to_execute_directly(vertices):
self._execute_graph_direct(graph)
return
for vertex, neighbour in graph.edges():
neighbour._generated_job.add_depends({vertex._generated_job})
# If all the jobs of the graph have another job with the same identity,
# we do not create them. Maybe we should check that the found jobs are
# part of the same graph, but not sure it's really required...
# Also, maybe we want to check only the root jobs.
existing_mapping = {}
for vertex in vertices:
if not vertex.identity_key:
continue
generated_job = vertex._generated_job
existing = generated_job.job_record_with_same_identity_key()
if not existing:
# at least one does not exist yet, we'll delay the whole graph
existing_mapping.clear()
break
existing_mapping[vertex] = existing
# We'll replace the generated jobs by the existing ones, so callers
# can retrieve the existing job in "_generated_job".
# existing_mapping contains something only if *all* the job with an
# identity have an existing one.
for vertex, existing in existing_mapping.items():
vertex._generated_job = existing
return
for vertex in vertices:
vertex._generated_job.store()
def _execute_graph_direct(self, graph):
for delayable in graph.topological_sort():
delayable._execute_direct()
class DelayableChain:
"""Chain of delayables to form a graph
Delayables can be other :class:`~Delayable`, :class:`~DelayableChain` or
:class:`~DelayableGroup` objects.
A chain means that jobs must be executed sequentially.
A job or a group of jobs depending on a group can be executed only after
the last job of the chain is done.
Chains can be connected to other Delayable, DelayableChain or
DelayableGroup objects by using :meth:`~done`.
A Chain is enqueued by calling :meth:`~delay`, which delays the whole
graph.
Important: :meth:`~delay` must be called on the top-level
delayable/chain/group object of the graph.
"""
__slots__ = ("_graph", "__head", "__tail")
def __init__(self, *delayables):
self._graph = DelayableGraph()
iter_delayables = iter(delayables)
head = next(iter_delayables)
self.__head = head
self._graph.add_vertex(head)
for neighbour in iter_delayables:
self._graph.add_edge(head, neighbour)
head = neighbour
self.__tail = head
def _head(self):
return self.__head._tail()
def _tail(self):
return self.__tail._head()
def __repr__(self):
inner_graph = "\n\t".join(repr(self._graph).split("\n"))
return "DelayableChain(\n\t{}\n)".format(inner_graph)
def on_done(self, *delayables):
"""Connects the current chain to other delayables/chains/groups
The delayables/chains/groups passed in the parameters will be executed
when the current Chain is done.
"""
for delayable in delayables:
self._graph.add_edge(self.__tail, delayable)
return self
def delay(self):
"""Delay the whole graph"""
self._graph.delay()
class DelayableGroup:
"""Group of delayables to form a graph
Delayables can be other :class:`~Delayable`, :class:`~DelayableChain` or
:class:`~DelayableGroup` objects.
A group means that jobs must be executed sequentially.
A job or a group of jobs depending on a group can be executed only after
the all the jobs of the group are done.
Groups can be connected to other Delayable, DelayableChain or
DelayableGroup objects by using :meth:`~done`.
A group is enqueued by calling :meth:`~delay`, which delays the whole
graph.
Important: :meth:`~delay` must be called on the top-level
delayable/chain/group object of the graph.
"""
__slots__ = ("_graph", "_delayables")
def __init__(self, *delayables):
self._graph = DelayableGraph()
self._delayables = set(delayables)
for delayable in delayables:
self._graph.add_vertex(delayable)
def _head(self):
return itertools.chain.from_iterable(node._head() for node in self._delayables)
def _tail(self):
return itertools.chain.from_iterable(node._tail() for node in self._delayables)
def __repr__(self):
inner_graph = "\n\t".join(repr(self._graph).split("\n"))
return "DelayableGroup(\n\t{}\n)".format(inner_graph)
def on_done(self, *delayables):
"""Connects the current group to other delayables/chains/groups
The delayables/chains/groups passed in the parameters will be executed
when the current Group is done.
"""
for parent in self._delayables:
for child in delayables:
self._graph.add_edge(parent, child)
return self
def delay(self):
"""Delay the whole graph"""
self._graph.delay()
class Delayable:
"""Unit of a graph, one Delayable will lead to an enqueued job
Delayables can have dependencies on each others, as well as dependencies on
:class:`~DelayableGroup` or :class:`~DelayableChain` objects.
This class will generally not be used directly, it is used internally
by :meth:`~odoo.addons.queue_job.models.base.Base.delayable`. Look
in the base model for more details.
Delayables can be connected to other Delayable, DelayableChain or
DelayableGroup objects by using :meth:`~done`.
Properties of the future job can be set using the :meth:`~set` method,
which always return ``self``::
delayable.set(priority=15).set({"max_retries": 5, "eta": 15}).delay()
It can be used for example to set properties dynamically.
A Delayable is enqueued by calling :meth:`delay()`, which delays the whole
graph.
Important: :meth:`delay()` must be called on the top-level
delayable/chain/group object of the graph.
"""
_properties = (
"priority",
"eta",
"max_retries",
"description",
"channel",
"identity_key",
)
__slots__ = _properties + (
"recordset",
"_graph",
"_job_method",
"_job_args",
"_job_kwargs",
"_generated_job",
)
def __init__(
self,
recordset,
priority=None,
eta=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
self._graph = DelayableGraph()
self._graph.add_vertex(self)
self.recordset = recordset
self.priority = priority
self.eta = eta
self.max_retries = max_retries
self.description = description
self.channel = channel
self.identity_key = identity_key
self._job_method = None
self._job_args = ()
self._job_kwargs = {}
self._generated_job = None
def _head(self):
return [self]
def _tail(self):
return [self]
def __repr__(self):
return "Delayable({}.{}({}, {}))".format(
self.recordset,
self._job_method.__name__ if self._job_method else "",
self._job_args,
self._job_kwargs,
)
def __del__(self):
if not self._generated_job:
_logger.warning("Delayable %s was prepared but never delayed", self)
def _set_from_dict(self, properties):
for key, value in properties.items():
if key not in self._properties:
raise ValueError("No property %s" % (key,))
setattr(self, key, value)
def set(self, *args, **kwargs):
"""Set job properties and return self
The values can be either a dictionary and/or keywork args
"""
if args:
# args must be a dict
self._set_from_dict(*args)
self._set_from_dict(kwargs)
return self
def on_done(self, *delayables):
"""Connects the current Delayable to other delayables/chains/groups
The delayables/chains/groups passed in the parameters will be executed
when the current Delayable is done.
"""
for child in delayables:
self._graph.add_edge(self, child)
return self
def delay(self):
"""Delay the whole graph"""
self._graph.delay()
def _build_job(self):
if self._generated_job:
return self._generated_job
self._generated_job = Job(
self._job_method,
args=self._job_args,
kwargs=self._job_kwargs,
priority=self.priority,
max_retries=self.max_retries,
eta=self.eta,
description=self.description,
channel=self.channel,
identity_key=self.identity_key,
)
return self._generated_job
def _store_args(self, *args, **kwargs):
self._job_args = args
self._job_kwargs = kwargs
return self
def __getattr__(self, name):
if name in self.__slots__:
return super().__getattr__(name)
if name in self.recordset:
raise AttributeError(
"only methods can be delayed (%s called on %s)" % (name, self.recordset)
)
recordset_method = getattr(self.recordset, name)
self._job_method = recordset_method
return self._store_args
def _execute_direct(self):
assert self._generated_job
self._generated_job.perform()
class DelayableRecordset(object):
"""Allow to delay a method for a recordset (shortcut way)
Usage::
delayable = DelayableRecordset(recordset, priority=20)
delayable.method(args, kwargs)
The method call will be processed asynchronously in the job queue, with
the passed arguments.
This class will generally not be used directly, it is used internally
by :meth:`~odoo.addons.queue_job.models.base.Base.with_delay`
"""
__slots__ = ("delayable",)
def __init__(
self,
recordset,
priority=None,
eta=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
self.delayable = Delayable(
recordset,
priority=priority,
eta=eta,
max_retries=max_retries,
description=description,
channel=channel,
identity_key=identity_key,
)
@property
def recordset(self):
return self.delayable.recordset
def __getattr__(self, name):
def _delay_delayable(*args, **kwargs):
getattr(self.delayable, name)(*args, **kwargs).delay()
return self.delayable._generated_job
return _delay_delayable
def __str__(self):
return "DelayableRecordset(%s%s)" % (
self.delayable.recordset._name,
getattr(self.delayable.recordset, "_ids", ""),
)
__repr__ = __str__
| 31.820064 | 19,983 |
902 |
py
|
PYTHON
|
15.0
|
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo.tools.sql import column_exists, table_exists
def migrate(cr, version):
if table_exists(cr, "queue_job") and not column_exists(
cr, "queue_job", "exec_time"
):
# Disable trigger otherwise the update takes ages.
cr.execute(
"""
ALTER TABLE queue_job DISABLE TRIGGER queue_job_notify;
"""
)
cr.execute(
"""
ALTER TABLE queue_job ADD COLUMN exec_time double precision DEFAULT 0;
"""
)
cr.execute(
"""
UPDATE
queue_job
SET
exec_time = EXTRACT(EPOCH FROM (date_done - date_started));
"""
)
cr.execute(
"""
ALTER TABLE queue_job ENABLE TRIGGER queue_job_notify;
"""
)
| 27.333333 | 902 |
1,371 |
py
|
PYTHON
|
15.0
|
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
from odoo import SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
def migrate(cr, version):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
_logger.info("Computing exception name for failed jobs")
_compute_jobs_new_values(env)
def _compute_jobs_new_values(env):
for job in env["queue.job"].search(
[("state", "=", "failed"), ("exc_info", "!=", False)]
):
exception_details = _get_exception_details(job)
if exception_details:
job.update(exception_details)
def _get_exception_details(job):
for line in reversed(job.exc_info.splitlines()):
if _find_exception(line):
name, msg = line.split(":", 1)
return {
"exc_name": name.strip(),
"exc_message": msg.strip("()', \""),
}
def _find_exception(line):
# Just a list of common errors.
# If you want to target others, add your own migration step for your db.
exceptions = (
"Error:", # catch all well named exceptions
# other live instance errors found
"requests.exceptions.MissingSchema",
"botocore.errorfactory.NoSuchKey",
)
for exc in exceptions:
if exc in line:
return exc
| 29.170213 | 1,371 |
1,420 |
py
|
PYTHON
|
15.0
|
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo.tests import common
class TestWizards(common.TransactionCase):
def setUp(self):
super().setUp()
self.job = (
self.env["queue.job"]
.with_context(
_job_edit_sentinel=self.env["queue.job"].EDIT_SENTINEL,
)
.create(
{
"uuid": "test",
"user_id": self.env.user.id,
"state": "failed",
"model_name": "queue.job",
"method_name": "write",
"args": (),
}
)
)
def _wizard(self, model_name):
return (
self.env[model_name]
.with_context(
active_model=self.job._name,
active_ids=self.job.ids,
)
.create({})
)
def test_01_requeue(self):
wizard = self._wizard("queue.requeue.job")
wizard.requeue()
self.assertEqual(self.job.state, "pending")
def test_02_cancel(self):
wizard = self._wizard("queue.jobs.to.cancelled")
wizard.set_cancelled()
self.assertEqual(self.job.state, "cancelled")
def test_03_done(self):
wizard = self._wizard("queue.jobs.to.done")
wizard.set_done()
self.assertEqual(self.job.state, "done")
| 29.583333 | 1,420 |
2,284 |
py
|
PYTHON
|
15.0
|
# copyright 2020 Camptocamp
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import exceptions
from odoo.tests import common
class TestJobFunction(common.TransactionCase):
def test_function_name_compute(self):
function = self.env["queue.job.function"].create(
{"model_id": self.env.ref("base.model_res_users").id, "method": "read"}
)
self.assertEqual(function.name, "<res.users>.read")
def test_function_name_inverse(self):
function = self.env["queue.job.function"].create({"name": "<res.users>.read"})
self.assertEqual(function.model_id.model, "res.users")
self.assertEqual(function.method, "read")
def test_function_name_inverse_invalid_regex(self):
with self.assertRaises(exceptions.UserError):
self.env["queue.job.function"].create({"name": "<res.users.read"})
def test_function_name_inverse_model_not_found(self):
with self.assertRaises(exceptions.UserError):
self.env["queue.job.function"].create(
{"name": "<this.model.does.not.exist>.read"}
)
def test_function_job_config(self):
channel = self.env["queue.job.channel"].create(
{"name": "foo", "parent_id": self.env.ref("queue_job.channel_root").id}
)
job_function = self.env["queue.job.function"].create(
{
"model_id": self.env.ref("base.model_res_users").id,
"method": "read",
"channel_id": channel.id,
"edit_retry_pattern": "{1: 2, 3: 4}",
"edit_related_action": (
'{"enable": True,'
' "func_name": "related_action_foo",'
' "kwargs": {"b": 1}}'
),
}
)
self.assertEqual(
self.env["queue.job.function"].job_config("<res.users>.read"),
self.env["queue.job.function"].JobConfig(
channel="root.foo",
retry_pattern={1: 2, 3: 4},
related_action_enable=True,
related_action_func_name="related_action_foo",
related_action_kwargs={"b": 1},
job_function_id=job_function.id,
),
)
| 40.070175 | 2,284 |
5,876 |
py
|
PYTHON
|
15.0
|
# copyright 2016 Camptocamp
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import json
from datetime import date, datetime
from lxml import etree
from odoo.tests import common
# pylint: disable=odoo-addons-relative-import
# we are testing, we want to test as we were an external consumer of the API
from odoo.addons.queue_job.fields import JobDecoder, JobEncoder
class TestJson(common.TransactionCase):
def test_encoder_recordset(self):
demo_user = self.env.ref("base.user_demo")
context = demo_user.context_get()
partner = self.env(user=demo_user, context=context).ref("base.main_partner")
value = partner
value_json = json.dumps(value, cls=JobEncoder)
expected = {
"uid": demo_user.id,
"_type": "odoo_recordset",
"model": "res.partner",
"ids": [partner.id],
"su": False,
# no allowed context by default, must be changed in 16.0
"context": {},
}
self.assertEqual(json.loads(value_json), expected)
def test_encoder_recordset_list(self):
demo_user = self.env.ref("base.user_demo")
context = demo_user.context_get()
partner = self.env(user=demo_user, context=context).ref("base.main_partner")
value = ["a", 1, partner]
value_json = json.dumps(value, cls=JobEncoder)
expected = [
"a",
1,
{
"uid": demo_user.id,
"_type": "odoo_recordset",
"model": "res.partner",
"ids": [partner.id],
"su": False,
# no allowed context by default, must be changed in 16.0
"context": {},
},
]
self.assertEqual(json.loads(value_json), expected)
def test_decoder_recordset(self):
demo_user = self.env.ref("base.user_demo")
context = demo_user.context_get()
partner = self.env(user=demo_user).ref("base.main_partner")
value_json = (
'{"_type": "odoo_recordset",'
'"model": "res.partner",'
'"su": false,'
'"ids": [%s],"uid": %s, '
'"context": {"tz": "%s", "lang": "%s"}}'
% (partner.id, demo_user.id, context["tz"], context["lang"])
)
expected = partner
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
self.assertEqual(demo_user, expected.env.user)
def test_decoder_recordset_list(self):
demo_user = self.env.ref("base.user_demo")
context = demo_user.context_get()
partner = self.env(user=demo_user).ref("base.main_partner")
value_json = (
'["a", 1, '
'{"_type": "odoo_recordset",'
'"model": "res.partner",'
'"su": false,'
'"ids": [%s],"uid": %s, '
'"context": {"tz": "%s", "lang": "%s"}}]'
% (partner.id, demo_user.id, context["tz"], context["lang"])
)
expected = ["a", 1, partner]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
self.assertEqual(demo_user, expected[2].env.user)
def test_decoder_recordset_list_without_user(self):
value_json = (
'["a", 1, {"_type": "odoo_recordset",' '"model": "res.users", "ids": [1]}]'
)
expected = ["a", 1, self.env.ref("base.user_root")]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
def test_encoder_datetime(self):
value = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)]
value_json = json.dumps(value, cls=JobEncoder)
expected = [
"a",
1,
{"_type": "datetime_isoformat", "value": "2017-04-19T08:48:50.000001"},
]
self.assertEqual(json.loads(value_json), expected)
def test_decoder_datetime(self):
value_json = (
'["a", 1, {"_type": "datetime_isoformat",'
'"value": "2017-04-19T08:48:50.000001"}]'
)
expected = ["a", 1, datetime(2017, 4, 19, 8, 48, 50, 1)]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
def test_encoder_date(self):
value = ["a", 1, date(2017, 4, 19)]
value_json = json.dumps(value, cls=JobEncoder)
expected = ["a", 1, {"_type": "date_isoformat", "value": "2017-04-19"}]
self.assertEqual(json.loads(value_json), expected)
def test_decoder_date(self):
value_json = '["a", 1, {"_type": "date_isoformat",' '"value": "2017-04-19"}]'
expected = ["a", 1, date(2017, 4, 19)]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
self.assertEqual(value, expected)
def test_encoder_etree(self):
etree_el = etree.Element("root", attr="val")
etree_el.append(etree.Element("child", attr="val"))
value = ["a", 1, etree_el]
value_json = json.dumps(value, cls=JobEncoder)
expected = [
"a",
1,
{
"_type": "etree_element",
"value": '<root attr="val"><child attr="val"/></root>',
},
]
self.assertEqual(json.loads(value_json), expected)
def test_decoder_etree(self):
value_json = '["a", 1, {"_type": "etree_element", "value": \
"<root attr=\\"val\\"><child attr=\\"val\\"/></root>"}]'
etree_el = etree.Element("root", attr="val")
etree_el.append(etree.Element("child", attr="val"))
expected = ["a", 1, etree.tostring(etree_el)]
value = json.loads(value_json, cls=JobDecoder, env=self.env)
value[2] = etree.tostring(value[2])
self.assertEqual(value, expected)
| 38.657895 | 5,876 |
15,028 |
py
|
PYTHON
|
15.0
|
# Copyright 2019 Camptocamp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import doctest
import logging
import sys
import typing
from contextlib import contextmanager
from itertools import groupby
from operator import attrgetter
from unittest import TestCase, mock
from odoo.addons.queue_job.delay import Graph
# pylint: disable=odoo-addons-relative-import
from odoo.addons.queue_job.job import Job
@contextmanager
def trap_jobs():
"""Context Manager used to test enqueuing of jobs
Trapping jobs allows to split the tests in:
* the part that delays the job with the expected arguments in one test
* the execution of the job itself in a second test
When the jobs are trapped, they are not executed at all, however, we
can verify they have been enqueued with the correct arguments and
properties.
Then in a second test, we can call the job method directly with the
arguments to test.
The context manager yields a instance of ``JobsTrap``, which provides
utilities and assert methods.
Example of method to test::
def button_that_uses_delayable_chain(self):
delayables = chain(
self.delayable(
channel="root.test",
description="Test",
eta=15,
identity_key=identity_exact,
max_retries=1,
priority=15,
).testing_method(1, foo=2),
self.delayable().testing_method('x', foo='y'),
self.delayable().no_description(),
)
delayables.delay()
Example of usage in a test::
with trap_jobs() as trap:
self.env['test.queue.job'].button_that_uses_delayable_chain()
trap.assert_jobs_count(3)
trap.assert_jobs_count(
2, only=self.env['test.queue.job'].testing_method
)
trap.assert_jobs_count(
1, only=self.env['test.queue.job'].no_description
)
trap.assert_enqueued_job(
self.env['test.queue.job'].testing_method,
args=(1,),
kwargs={"foo": 2},
properties=dict(
channel="root.test",
description="Test",
eta=15,
identity_key=identity_exact,
max_retries=1,
priority=15,
)
)
trap.assert_enqueued_job(
self.env['test.queue.job'].testing_method,
args=("x",),
kwargs={"foo": "y"},
)
trap.assert_enqueued_job(
self.env['test.queue.job'].no_description,
)
# optionally, you can perform the jobs synchronously (without going
# to the database)
jobs_tester.perform_enqueued_jobs()
"""
with mock.patch(
"odoo.addons.queue_job.delay.Job",
name="Job Class",
auto_spec=True,
) as job_cls_mock:
with JobsTrap(job_cls_mock) as trap:
yield trap
class JobCall(typing.NamedTuple):
method: typing.Callable
args: tuple
kwargs: dict
properties: dict
def __eq__(self, other):
if not isinstance(other, JobCall):
return NotImplemented
return (
self.method.__self__ == other.method.__self__
and self.method.__func__ == other.method.__func__
and self.args == other.args
and self.kwargs == other.kwargs
and self.properties == other.properties
)
class JobsTrap:
"""Used by ``trap_jobs()``, provide assert methods on the trapped jobs
Look the documentation of ``trap_jobs()`` for a usage example.
The ``store`` method of the Job instances is mocked so they are never
saved in database.
Helpers for tests:
* ``jobs_count``
* ``assert_jobs_count``
* ``assert_enqueued_job``
* ``perform_enqueued_jobs``
You can also access the list of calls that were made to enqueue the jobs in
the ``calls`` attribute, and the generated jobs in the ``enqueued_jobs``.
"""
def __init__(self, job_mock):
self.job_mock = job_mock
self.job_mock.side_effect = self._add_job
# 1 call == 1 job, they share the same position in the lists
self.calls = []
self.enqueued_jobs = []
self._store_patchers = []
self._test_case = TestCase()
def jobs_count(self, only=None):
"""Return the count of enqueued jobs
``only`` is an option method on which the count is filtered
"""
if only:
return len(self._filtered_enqueued_jobs(only))
return len(self.enqueued_jobs)
def assert_jobs_count(self, expected, only=None):
"""Raise an assertion error if the count of enqueued jobs does not match
``only`` is an option method on which the count is filtered
"""
self._test_case.assertEqual(self.jobs_count(only=only), expected)
def assert_enqueued_job(self, method, args=None, kwargs=None, properties=None):
"""Raise an assertion error if the expected method has not been enqueued
* ``method`` is the method (as method object) delayed as job
* ``args`` is a tuple of arguments passed to the job method
* ``kwargs`` is a dict of keyword arguments passed to the job method
* ``properties`` is a dict of job properties (priority, eta, ...)
The args and the kwargs *must* be match exactly what has been enqueued
in the job method. The properties are optional: if the job has been
enqueued with a custom description but the assert method is not called
with ``description`` in the properties, it still matches the call.
However, if a ``description`` is passed in the assert's properties, it
must match.
"""
if properties is None:
properties = {}
if args is None:
args = ()
if kwargs is None:
kwargs = {}
expected_call = JobCall(
method=method,
args=args,
kwargs=kwargs,
properties=properties,
)
actual_calls = []
for call in self.calls:
checked_properties = {
key: value
for key, value in call.properties.items()
if key in properties
}
# build copy of calls with only the properties that we want to
# check
actual_calls.append(
JobCall(
method=call.method,
args=call.args,
kwargs=call.kwargs,
properties=checked_properties,
)
)
if expected_call not in actual_calls:
raise AssertionError(
"Job %s was not enqueued.\n"
"Actual enqueued jobs:\n%s"
% (
self._format_job_call(expected_call),
"\n".join(
" * %s" % (self._format_job_call(call),)
for call in actual_calls
),
)
)
def perform_enqueued_jobs(self):
"""Perform the enqueued jobs synchronously"""
def by_graph(job):
return job.graph_uuid or ""
sorted_jobs = sorted(self.enqueued_jobs, key=by_graph)
for graph_uuid, jobs in groupby(sorted_jobs, key=by_graph):
if graph_uuid:
self._perform_graph_jobs(jobs)
else:
self._perform_single_jobs(jobs)
def _perform_single_jobs(self, jobs):
# we probably don't want to replicate a perfect order here, but at
# least respect the priority
for job in sorted(jobs, key=attrgetter("priority")):
job.perform()
def _perform_graph_jobs(self, jobs):
graph = Graph()
for job in jobs:
graph.add_vertex(job)
for parent in job.depends_on:
graph.add_edge(parent, job)
for job in graph.topological_sort():
job.perform()
def _add_job(self, *args, **kwargs):
job = Job(*args, **kwargs)
self.enqueued_jobs.append(job)
patcher = mock.patch.object(job, "store")
self._store_patchers.append(patcher)
patcher.start()
job_args = kwargs.pop("args", None) or ()
job_kwargs = kwargs.pop("kwargs", None) or {}
self.calls.append(
JobCall(
method=args[0],
args=job_args,
kwargs=job_kwargs,
properties=kwargs,
)
)
return job
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for patcher in self._store_patchers:
patcher.stop()
def _filtered_enqueued_jobs(self, job_method):
enqueued_jobs = [
job
for job in self.enqueued_jobs
if job.func.__self__ == job_method.__self__
and job.func.__func__ == job_method.__func__
]
return enqueued_jobs
def _format_job_call(self, call):
method_all_args = []
if call.args:
method_all_args.append(", ".join("%s" % (arg,) for arg in call.args))
if call.kwargs:
method_all_args.append(
", ".join("%s=%s" % (key, value) for key, value in call.kwargs.items())
)
return "<%s>.%s(%s) with properties (%s)" % (
call.method.__self__,
call.method.__name__,
", ".join(method_all_args),
", ".join("%s=%s" % (key, value) for key, value in call.properties.items()),
)
def __repr__(self):
return repr(self.calls)
class JobCounter:
def __init__(self, env):
super().__init__()
self.env = env
self.existing = self.search_all()
def count_all(self):
return len(self.search_all())
def count_created(self):
return len(self.search_created())
def count_existing(self):
return len(self.existing)
def search_created(self):
return self.search_all() - self.existing
def search_all(self):
return self.env["queue.job"].search([])
class JobMixin:
def job_counter(self):
return JobCounter(self.env)
def perform_jobs(self, jobs):
for job in jobs.search_created():
Job.load(self.env, job.uuid).perform()
@contextmanager
def trap_jobs(self):
with trap_jobs() as trap:
yield trap
@contextmanager
def mock_with_delay():
"""Context Manager mocking ``with_delay()``
DEPRECATED: use ``trap_jobs()'``.
Mocking this method means we can decorrelate the tests in:
* the part that delay the job with the expected arguments
* the execution of the job itself
The first kind of test does not need to actually create the jobs in the
database, as we can inspect how the Mocks were called.
The second kind of test calls directly the method decorated by ``@job``
with the arguments that we want to test.
The context manager returns 2 mocks:
* the first allow to check that with_delay() was called and with which
arguments
* the second to check which job method was called and with which arguments.
Example of test::
def test_export(self):
with mock_with_delay() as (delayable_cls, delayable):
# inside this method, there is a call
# partner.with_delay(priority=15).export_record('test')
self.record.run_export()
# check 'with_delay()' part:
self.assertEqual(delayable_cls.call_count, 1)
# arguments passed in 'with_delay()'
delay_args, delay_kwargs = delayable_cls.call_args
self.assertEqual(
delay_args, (self.env['res.partner'],)
)
self.assertDictEqual(delay_kwargs, {priority: 15})
# check what's passed to the job method 'export_record'
self.assertEqual(delayable.export_record.call_count, 1)
delay_args, delay_kwargs = delayable.export_record.call_args
self.assertEqual(delay_args, ('test',))
self.assertDictEqual(delay_kwargs, {})
An example of the first kind of test:
https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_batch_timestamp_import.py#L43-L76 # noqa
And the second kind:
https://github.com/camptocamp/connector-jira/blob/0ca4261b3920d5e8c2ae4bb0fc352ea3f6e9d2cd/connector_jira/tests/test_import_task.py#L34-L46 # noqa
"""
with mock.patch(
"odoo.addons.queue_job.models.base.DelayableRecordset",
name="DelayableRecordset",
spec=True,
) as delayable_cls:
# prepare the mocks
delayable = mock.MagicMock(name="DelayableBinding")
delayable_cls.return_value = delayable
yield delayable_cls, delayable
class OdooDocTestCase(doctest.DocTestCase):
"""
We need a custom DocTestCase class in order to:
- define test_tags to run as part of standard tests
- output a more meaningful test name than default "DocTestCase.runTest"
"""
def __init__(
self, doctest, optionflags=0, setUp=None, tearDown=None, checker=None, seq=0
):
super().__init__(
doctest._dt_test,
optionflags=optionflags,
setUp=setUp,
tearDown=tearDown,
checker=checker,
)
self.test_sequence = seq
def setUp(self):
"""Log an extra statement which test is started."""
super(OdooDocTestCase, self).setUp()
logging.getLogger(__name__).info("Running tests for %s", self._dt_test.name)
def load_doctests(module):
"""
Generates a tests loading method for the doctests of the given module
https://docs.python.org/3/library/unittest.html#load-tests-protocol
"""
def load_tests(loader, tests, ignore):
"""
Apply the 'test_tags' attribute to each DocTestCase found by the DocTestSuite.
Also extend the DocTestCase class trivially to fit the class teardown
that Odoo backported for its own test classes from Python 3.8.
"""
if sys.version_info < (3, 8):
doctest.DocTestCase.doClassCleanups = lambda: None
doctest.DocTestCase.tearDown_exceptions = []
for idx, test in enumerate(doctest.DocTestSuite(module)):
odoo_test = OdooDocTestCase(test, seq=idx)
odoo_test.test_tags = {"standard", "at_install", "queue_job", "doctest"}
tests.addTest(odoo_test)
return tests
return load_tests
| 33.174393 | 15,028 |
350 |
py
|
PYTHON
|
15.0
|
# Copyright 2015-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
# pylint: disable=odoo-addons-relative-import
# we are testing, we want to test as we were an external consumer of the API
from odoo.addons.queue_job.jobrunner import runner
from .common import load_doctests
load_tests = load_doctests(runner)
| 35 | 350 |
354 |
py
|
PYTHON
|
15.0
|
# Copyright 2015-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
# pylint: disable=odoo-addons-relative-import
# we are testing, we want to test as we were an external consumer of the API
from odoo.addons.queue_job.jobrunner import channels
from .common import load_doctests
load_tests = load_doctests(channels)
| 35.4 | 354 |
2,284 |
py
|
PYTHON
|
15.0
|
# copyright 2018 Camptocamp
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from psycopg2 import IntegrityError
import odoo
from odoo.tests import common
class TestJobChannel(common.TransactionCase):
def setUp(self):
super().setUp()
self.Channel = self.env["queue.job.channel"]
self.root_channel = self.Channel.search([("name", "=", "root")])
def test_channel_new(self):
channel = self.Channel.new()
self.assertFalse(channel.name)
self.assertFalse(channel.complete_name)
def test_channel_create(self):
channel = self.Channel.create(
{"name": "test", "parent_id": self.root_channel.id}
)
self.assertEqual(channel.name, "test")
self.assertEqual(channel.complete_name, "root.test")
channel2 = self.Channel.create({"name": "test", "parent_id": channel.id})
self.assertEqual(channel2.name, "test")
self.assertEqual(channel2.complete_name, "root.test.test")
@odoo.tools.mute_logger("odoo.sql_db")
def test_channel_complete_name_uniq(self):
channel = self.Channel.create(
{"name": "test", "parent_id": self.root_channel.id}
)
self.assertEqual(channel.name, "test")
self.assertEqual(channel.complete_name, "root.test")
self.Channel.create({"name": "test", "parent_id": self.root_channel.id})
# Flush process all the pending recomputations (or at least the
# given field and flush the pending updates to the database.
# It is normally called on commit.
# The context manager 'with self.assertRaises(IntegrityError)' purposefully
# not uses here due to its 'flush()' method inside it and exception raises
# before the line 'self.env["base"].flush()'. So, we are expecting an IntegrityError.
try:
self.env["base"].flush()
except IntegrityError as ex:
self.assertIn("queue_job_channel_name_uniq", ex.pgerror)
else:
self.assertEqual(True, False)
def test_channel_name_get(self):
channel = self.Channel.create(
{"name": "test", "parent_id": self.root_channel.id}
)
self.assertEqual(channel.name_get(), [(channel.id, "root.test")])
| 38.711864 | 2,284 |
943 |
py
|
PYTHON
|
15.0
|
# copyright 2020 Camptocamp
# license lgpl-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import exceptions
from odoo.tests import common
class TestJobWriteProtected(common.TransactionCase):
def test_create_error(self):
with self.assertRaises(exceptions.AccessError):
self.env["queue.job"].create(
{"uuid": "test", "model_name": "res.partner", "method_name": "write"}
)
def test_write_protected_field_error(self):
job_ = self.env["res.partner"].with_delay().create({"name": "test"})
db_job = job_.db_record()
with self.assertRaises(exceptions.AccessError):
db_job.method_name = "unlink"
def test_write_allow_no_protected_field_error(self):
job_ = self.env["res.partner"].with_delay().create({"name": "test"})
db_job = job_.db_record()
db_job.priority = 30
self.assertEqual(db_job.priority, 30)
| 37.72 | 943 |
10,049 |
py
|
PYTHON
|
15.0
|
# copyright 2019 Camptocamp
# license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import unittest
from unittest import mock
from odoo.addons.queue_job.delay import Delayable, DelayableGraph
class TestDelayable(unittest.TestCase):
def setUp(self):
super().setUp()
self.recordset = mock.MagicMock(name="recordset")
def test_delayable_set(self):
dl = Delayable(self.recordset)
dl.set(priority=15)
self.assertEqual(dl.priority, 15)
dl.set({"priority": 20, "description": "test"})
self.assertEqual(dl.priority, 20)
self.assertEqual(dl.description, "test")
def test_delayable_set_unknown(self):
dl = Delayable(self.recordset)
with self.assertRaises(ValueError):
dl.set(foo=15)
def test_graph_add_vertex_edge(self):
graph = DelayableGraph()
graph.add_vertex("a")
self.assertEqual(graph._graph, {"a": set()})
graph.add_edge("a", "b")
self.assertEqual(graph._graph, {"a": {"b"}, "b": set()})
graph.add_edge("b", "c")
self.assertEqual(graph._graph, {"a": {"b"}, "b": {"c"}, "c": set()})
def test_graph_vertices(self):
graph = DelayableGraph({"a": {"b"}, "b": {"c"}, "c": set()})
self.assertEqual(graph.vertices(), {"a", "b", "c"})
def test_graph_edges(self):
graph = DelayableGraph(
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
)
self.assertEqual(
sorted(graph.edges()),
sorted(
[
("a", "b"),
("b", "c"),
("b", "d"),
("c", "e"),
]
),
)
def test_graph_connect(self):
node_tail = Delayable(self.recordset)
node_tail2 = Delayable(self.recordset)
node_middle = Delayable(self.recordset)
node_top = Delayable(self.recordset)
node_middle.on_done(node_tail)
node_middle.on_done(node_tail2)
node_top.on_done(node_middle)
collected = node_top._graph._connect_graphs()
self.assertEqual(
collected._graph,
{
node_tail: set(),
node_tail2: set(),
node_middle: {node_tail, node_tail2},
node_top: {node_middle},
},
)
def test_graph_paths(self):
graph = DelayableGraph(
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
)
paths = list(graph.paths("a"))
self.assertEqual(sorted(paths), sorted([["a", "b", "d"], ["a", "b", "c", "e"]]))
paths = list(graph.paths("b"))
self.assertEqual(sorted(paths), sorted([["b", "d"], ["b", "c", "e"]]))
paths = list(graph.paths("c"))
self.assertEqual(paths, [["c", "e"]])
paths = list(graph.paths("d"))
self.assertEqual(paths, [["d"]])
paths = list(graph.paths("e"))
self.assertEqual(paths, [["e"]])
def test_graph_repr(self):
graph = DelayableGraph(
{"a": {"b"}, "b": {"c", "d"}, "c": {"e"}, "d": set(), "e": set()}
)
actual = repr(graph)
expected = ["'a' → 'b' → 'c' → 'e'", "'a' → 'b' → 'd'"]
self.assertEqual(sorted(actual.split("\n")), expected)
def test_graph_topological_sort(self):
# the graph is an example from
# https://en.wikipedia.org/wiki/Topological_sorting
# if you want a visual representation
graph = DelayableGraph(
{
5: {11},
7: {11, 8},
3: {8, 10},
11: {2, 9, 10},
2: set(),
8: {9},
9: set(),
10: set(),
}
)
# these are all the pre-computed combinations that
# respect the dependencies order
valid_solutions = [
[3, 5, 7, 8, 11, 2, 9, 10],
[3, 5, 7, 8, 11, 2, 10, 9],
[3, 5, 7, 8, 11, 9, 2, 10],
[3, 5, 7, 8, 11, 9, 10, 2],
[3, 5, 7, 8, 11, 10, 2, 9],
[3, 5, 7, 8, 11, 10, 9, 2],
[3, 5, 7, 11, 2, 8, 9, 10],
[3, 5, 7, 11, 2, 8, 10, 9],
[3, 5, 7, 11, 2, 10, 8, 9],
[3, 5, 7, 11, 8, 2, 9, 10],
[3, 5, 7, 11, 8, 2, 10, 9],
[3, 5, 7, 11, 8, 9, 2, 10],
[3, 5, 7, 11, 8, 9, 10, 2],
[3, 5, 7, 11, 8, 10, 2, 9],
[3, 5, 7, 11, 8, 10, 9, 2],
[3, 5, 7, 11, 10, 2, 8, 9],
[3, 5, 7, 11, 10, 8, 2, 9],
[3, 5, 7, 11, 10, 8, 9, 2],
[3, 7, 5, 8, 11, 2, 9, 10],
[3, 7, 5, 8, 11, 2, 10, 9],
[3, 7, 5, 8, 11, 9, 2, 10],
[3, 7, 5, 8, 11, 9, 10, 2],
[3, 7, 5, 8, 11, 10, 2, 9],
[3, 7, 5, 8, 11, 10, 9, 2],
[3, 7, 5, 11, 2, 8, 9, 10],
[3, 7, 5, 11, 2, 8, 10, 9],
[3, 7, 5, 11, 2, 10, 8, 9],
[3, 7, 5, 11, 8, 2, 9, 10],
[3, 7, 5, 11, 8, 2, 10, 9],
[3, 7, 5, 11, 8, 9, 2, 10],
[3, 7, 5, 11, 8, 9, 10, 2],
[3, 7, 5, 11, 8, 10, 2, 9],
[3, 7, 5, 11, 8, 10, 9, 2],
[3, 7, 5, 11, 10, 2, 8, 9],
[3, 7, 5, 11, 10, 8, 2, 9],
[3, 7, 5, 11, 10, 8, 9, 2],
[3, 7, 8, 5, 11, 2, 9, 10],
[3, 7, 8, 5, 11, 2, 10, 9],
[3, 7, 8, 5, 11, 9, 2, 10],
[3, 7, 8, 5, 11, 9, 10, 2],
[3, 7, 8, 5, 11, 10, 2, 9],
[3, 7, 8, 5, 11, 10, 9, 2],
[5, 3, 7, 8, 11, 2, 9, 10],
[5, 3, 7, 8, 11, 2, 10, 9],
[5, 3, 7, 8, 11, 9, 2, 10],
[5, 3, 7, 8, 11, 9, 10, 2],
[5, 3, 7, 8, 11, 10, 2, 9],
[5, 3, 7, 8, 11, 10, 9, 2],
[5, 3, 7, 11, 2, 8, 9, 10],
[5, 3, 7, 11, 2, 8, 10, 9],
[5, 3, 7, 11, 2, 10, 8, 9],
[5, 3, 7, 11, 8, 2, 9, 10],
[5, 3, 7, 11, 8, 2, 10, 9],
[5, 3, 7, 11, 8, 9, 2, 10],
[5, 3, 7, 11, 8, 9, 10, 2],
[5, 3, 7, 11, 8, 10, 2, 9],
[5, 3, 7, 11, 8, 10, 9, 2],
[5, 3, 7, 11, 10, 2, 8, 9],
[5, 3, 7, 11, 10, 8, 2, 9],
[5, 3, 7, 11, 10, 8, 9, 2],
[5, 7, 3, 8, 11, 2, 9, 10],
[5, 7, 3, 8, 11, 2, 10, 9],
[5, 7, 3, 8, 11, 9, 2, 10],
[5, 7, 3, 8, 11, 9, 10, 2],
[5, 7, 3, 8, 11, 10, 2, 9],
[5, 7, 3, 8, 11, 10, 9, 2],
[5, 7, 3, 11, 2, 8, 9, 10],
[5, 7, 3, 11, 2, 8, 10, 9],
[5, 7, 3, 11, 2, 10, 8, 9],
[5, 7, 3, 11, 8, 2, 9, 10],
[5, 7, 3, 11, 8, 2, 10, 9],
[5, 7, 3, 11, 8, 9, 2, 10],
[5, 7, 3, 11, 8, 9, 10, 2],
[5, 7, 3, 11, 8, 10, 2, 9],
[5, 7, 3, 11, 8, 10, 9, 2],
[5, 7, 3, 11, 10, 2, 8, 9],
[5, 7, 3, 11, 10, 8, 2, 9],
[5, 7, 3, 11, 10, 8, 9, 2],
[5, 7, 11, 2, 3, 8, 9, 10],
[5, 7, 11, 2, 3, 8, 10, 9],
[5, 7, 11, 2, 3, 10, 8, 9],
[5, 7, 11, 3, 2, 8, 9, 10],
[5, 7, 11, 3, 2, 8, 10, 9],
[5, 7, 11, 3, 2, 10, 8, 9],
[5, 7, 11, 3, 8, 2, 9, 10],
[5, 7, 11, 3, 8, 2, 10, 9],
[5, 7, 11, 3, 8, 9, 2, 10],
[5, 7, 11, 3, 8, 9, 10, 2],
[5, 7, 11, 3, 8, 10, 2, 9],
[5, 7, 11, 3, 8, 10, 9, 2],
[5, 7, 11, 3, 10, 2, 8, 9],
[5, 7, 11, 3, 10, 8, 2, 9],
[5, 7, 11, 3, 10, 8, 9, 2],
[7, 3, 5, 8, 11, 2, 9, 10],
[7, 3, 5, 8, 11, 2, 10, 9],
[7, 3, 5, 8, 11, 9, 2, 10],
[7, 3, 5, 8, 11, 9, 10, 2],
[7, 3, 5, 8, 11, 10, 2, 9],
[7, 3, 5, 8, 11, 10, 9, 2],
[7, 3, 5, 11, 2, 8, 9, 10],
[7, 3, 5, 11, 2, 8, 10, 9],
[7, 3, 5, 11, 2, 10, 8, 9],
[7, 3, 5, 11, 8, 2, 9, 10],
[7, 3, 5, 11, 8, 2, 10, 9],
[7, 3, 5, 11, 8, 9, 2, 10],
[7, 3, 5, 11, 8, 9, 10, 2],
[7, 3, 5, 11, 8, 10, 2, 9],
[7, 3, 5, 11, 8, 10, 9, 2],
[7, 3, 5, 11, 10, 2, 8, 9],
[7, 3, 5, 11, 10, 8, 2, 9],
[7, 3, 5, 11, 10, 8, 9, 2],
[7, 3, 8, 5, 11, 2, 9, 10],
[7, 3, 8, 5, 11, 2, 10, 9],
[7, 3, 8, 5, 11, 9, 2, 10],
[7, 3, 8, 5, 11, 9, 10, 2],
[7, 3, 8, 5, 11, 10, 2, 9],
[7, 3, 8, 5, 11, 10, 9, 2],
[7, 5, 3, 8, 11, 2, 9, 10],
[7, 5, 3, 8, 11, 2, 10, 9],
[7, 5, 3, 8, 11, 9, 2, 10],
[7, 5, 3, 8, 11, 9, 10, 2],
[7, 5, 3, 8, 11, 10, 2, 9],
[7, 5, 3, 8, 11, 10, 9, 2],
[7, 5, 3, 11, 2, 8, 9, 10],
[7, 5, 3, 11, 2, 8, 10, 9],
[7, 5, 3, 11, 2, 10, 8, 9],
[7, 5, 3, 11, 8, 2, 9, 10],
[7, 5, 3, 11, 8, 2, 10, 9],
[7, 5, 3, 11, 8, 9, 2, 10],
[7, 5, 3, 11, 8, 9, 10, 2],
[7, 5, 3, 11, 8, 10, 2, 9],
[7, 5, 3, 11, 8, 10, 9, 2],
[7, 5, 3, 11, 10, 2, 8, 9],
[7, 5, 3, 11, 10, 8, 2, 9],
[7, 5, 3, 11, 10, 8, 9, 2],
[7, 5, 11, 2, 3, 8, 9, 10],
[7, 5, 11, 2, 3, 8, 10, 9],
[7, 5, 11, 2, 3, 10, 8, 9],
[7, 5, 11, 3, 2, 8, 9, 10],
[7, 5, 11, 3, 2, 8, 10, 9],
[7, 5, 11, 3, 2, 10, 8, 9],
[7, 5, 11, 3, 8, 2, 9, 10],
[7, 5, 11, 3, 8, 2, 10, 9],
[7, 5, 11, 3, 8, 9, 2, 10],
[7, 5, 11, 3, 8, 9, 10, 2],
[7, 5, 11, 3, 8, 10, 2, 9],
[7, 5, 11, 3, 8, 10, 9, 2],
[7, 5, 11, 3, 10, 2, 8, 9],
[7, 5, 11, 3, 10, 8, 2, 9],
[7, 5, 11, 3, 10, 8, 9, 2],
]
self.assertIn(list(graph.topological_sort()), valid_solutions)
| 37.04428 | 10,039 |
18,026 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
import random
from datetime import datetime, timedelta
from odoo import _, api, exceptions, fields, models
from odoo.osv import expression
from odoo.tools import config, html_escape
from odoo.addons.base_sparse_field.models.fields import Serialized
from ..delay import Graph
from ..exception import JobError
from ..fields import JobSerialized
from ..job import (
CANCELLED,
DONE,
FAILED,
PENDING,
STARTED,
STATES,
WAIT_DEPENDENCIES,
Job,
)
_logger = logging.getLogger(__name__)
class QueueJob(models.Model):
"""Model storing the jobs to be executed."""
_name = "queue.job"
_description = "Queue Job"
_inherit = ["mail.thread", "mail.activity.mixin"]
_log_access = False
_order = "date_created DESC, date_done DESC"
_removal_interval = 30 # days
_default_related_action = "related_action_open_record"
# This must be passed in a context key "_job_edit_sentinel" to write on
# protected fields. It protects against crafting "queue.job" records from
# RPC (e.g. on internal methods). When ``with_delay`` is used, the sentinel
# is set.
EDIT_SENTINEL = object()
_protected_fields = (
"uuid",
"name",
"date_created",
"model_name",
"method_name",
"func_string",
"channel_method_name",
"job_function_id",
"records",
"args",
"kwargs",
)
uuid = fields.Char(string="UUID", readonly=True, index=True, required=True)
graph_uuid = fields.Char(
string="Graph UUID",
readonly=True,
index=True,
help="Single shared identifier of a Graph. Empty for a single job.",
)
user_id = fields.Many2one(comodel_name="res.users", string="User ID")
company_id = fields.Many2one(
comodel_name="res.company", string="Company", index=True
)
name = fields.Char(string="Description", readonly=True)
model_name = fields.Char(string="Model", readonly=True)
method_name = fields.Char(readonly=True)
# record_ids field is only for backward compatibility (e.g. used in related
# actions), can be removed (replaced by "records") in 14.0
record_ids = JobSerialized(compute="_compute_record_ids", base_type=list)
records = JobSerialized(
string="Record(s)",
readonly=True,
base_type=models.BaseModel,
)
dependencies = Serialized(readonly=True)
# dependency graph as expected by the field widget
dependency_graph = Serialized(compute="_compute_dependency_graph")
graph_jobs_count = fields.Integer(compute="_compute_graph_jobs_count")
args = JobSerialized(readonly=True, base_type=tuple)
kwargs = JobSerialized(readonly=True, base_type=dict)
func_string = fields.Char(string="Task", readonly=True)
state = fields.Selection(STATES, readonly=True, required=True, index=True)
priority = fields.Integer()
exc_name = fields.Char(string="Exception", readonly=True)
exc_message = fields.Char(string="Exception Message", readonly=True)
exc_info = fields.Text(string="Exception Info", readonly=True)
result = fields.Text(readonly=True)
date_created = fields.Datetime(string="Created Date", readonly=True)
date_started = fields.Datetime(string="Start Date", readonly=True)
date_enqueued = fields.Datetime(string="Enqueue Time", readonly=True)
date_done = fields.Datetime(readonly=True)
exec_time = fields.Float(
string="Execution Time (avg)",
group_operator="avg",
help="Time required to execute this job in seconds. Average when grouped.",
)
date_cancelled = fields.Datetime(readonly=True)
eta = fields.Datetime(string="Execute only after")
retry = fields.Integer(string="Current try")
max_retries = fields.Integer(
string="Max. retries",
help="The job will fail if the number of tries reach the "
"max. retries.\n"
"Retries are infinite when empty.",
)
# FIXME the name of this field is very confusing
channel_method_name = fields.Char(string="Complete Method Name", readonly=True)
job_function_id = fields.Many2one(
comodel_name="queue.job.function",
string="Job Function",
readonly=True,
)
channel = fields.Char(index=True)
identity_key = fields.Char(readonly=True)
worker_pid = fields.Integer(readonly=True)
def init(self):
self._cr.execute(
"SELECT indexname FROM pg_indexes WHERE indexname = %s ",
("queue_job_identity_key_state_partial_index",),
)
if not self._cr.fetchone():
self._cr.execute(
"CREATE INDEX queue_job_identity_key_state_partial_index "
"ON queue_job (identity_key) WHERE state in ('pending', "
"'enqueued') AND identity_key IS NOT NULL;"
)
@api.depends("records")
def _compute_record_ids(self):
for record in self:
record.record_ids = record.records.ids
@api.depends("dependencies")
def _compute_dependency_graph(self):
jobs_groups = self.env["queue.job"].read_group(
[
(
"graph_uuid",
"in",
[uuid for uuid in self.mapped("graph_uuid") if uuid],
)
],
["graph_uuid", "ids:array_agg(id)"],
["graph_uuid"],
)
ids_per_graph_uuid = {
group["graph_uuid"]: group["ids"] for group in jobs_groups
}
for record in self:
if not record.graph_uuid:
record.dependency_graph = {}
continue
graph_jobs = self.browse(ids_per_graph_uuid.get(record.graph_uuid) or [])
if not graph_jobs:
record.dependency_graph = {}
continue
graph_ids = {graph_job.uuid: graph_job.id for graph_job in graph_jobs}
graph_jobs_by_ids = {graph_job.id: graph_job for graph_job in graph_jobs}
graph = Graph()
for graph_job in graph_jobs:
graph.add_vertex(graph_job.id)
for parent_uuid in graph_job.dependencies["depends_on"]:
parent_id = graph_ids.get(parent_uuid)
if not parent_id:
continue
graph.add_edge(parent_id, graph_job.id)
for child_uuid in graph_job.dependencies["reverse_depends_on"]:
child_id = graph_ids.get(child_uuid)
if not child_id:
continue
graph.add_edge(graph_job.id, child_id)
record.dependency_graph = {
# list of ids
"nodes": [
graph_jobs_by_ids[graph_id]._dependency_graph_vis_node()
for graph_id in graph.vertices()
],
# list of tuples (from, to)
"edges": graph.edges(),
}
def _dependency_graph_vis_node(self):
"""Return the node as expected by the JobDirectedGraph widget"""
default = ("#D2E5FF", "#2B7CE9")
colors = {
DONE: ("#C2FABC", "#4AD63A"),
FAILED: ("#FB7E81", "#FA0A10"),
STARTED: ("#FFFF00", "#FFA500"),
}
return {
"id": self.id,
"title": "<strong>%s</strong><br/>%s"
% (
html_escape(self.display_name),
html_escape(self.func_string),
),
"color": colors.get(self.state, default)[0],
"border": colors.get(self.state, default)[1],
"shadow": True,
}
def _compute_graph_jobs_count(self):
jobs_groups = self.env["queue.job"].read_group(
[
(
"graph_uuid",
"in",
[uuid for uuid in self.mapped("graph_uuid") if uuid],
)
],
["graph_uuid"],
["graph_uuid"],
)
count_per_graph_uuid = {
group["graph_uuid"]: group["graph_uuid_count"] for group in jobs_groups
}
for record in self:
record.graph_jobs_count = count_per_graph_uuid.get(record.graph_uuid) or 0
@api.model_create_multi
def create(self, vals_list):
if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL:
# Prevent to create a queue.job record "raw" from RPC.
# ``with_delay()`` must be used.
raise exceptions.AccessError(
_("Queue jobs must be created by calling 'with_delay()'.")
)
return super(
QueueJob,
self.with_context(mail_create_nolog=True, mail_create_nosubscribe=True),
).create(vals_list)
def write(self, vals):
if self.env.context.get("_job_edit_sentinel") is not self.EDIT_SENTINEL:
write_on_protected_fields = [
fieldname for fieldname in vals if fieldname in self._protected_fields
]
if write_on_protected_fields:
raise exceptions.AccessError(
_("Not allowed to change field(s): {}").format(
write_on_protected_fields
)
)
different_user_jobs = self.browse()
if vals.get("user_id"):
different_user_jobs = self.filtered(
lambda records: records.env.user.id != vals["user_id"]
)
if vals.get("state") == "failed":
self._message_post_on_failure()
result = super().write(vals)
for record in different_user_jobs:
# the user is stored in the env of the record, but we still want to
# have a stored user_id field to be able to search/groupby, so
# synchronize the env of records with user_id
super(QueueJob, record).write(
{"records": record.records.with_user(vals["user_id"])}
)
return result
def open_related_action(self):
"""Open the related action associated to the job"""
self.ensure_one()
job = Job.load(self.env, self.uuid)
action = job.related_action()
if action is None:
raise exceptions.UserError(_("No action available for this job"))
return action
def open_graph_jobs(self):
"""Return action that opens all jobs of the same graph"""
self.ensure_one()
jobs = self.env["queue.job"].search([("graph_uuid", "=", self.graph_uuid)])
action = self.env["ir.actions.act_window"]._for_xml_id(
"queue_job.action_queue_job"
)
action.update(
{
"name": _("Jobs for graph %s") % (self.graph_uuid),
"context": {},
"domain": [("id", "in", jobs.ids)],
}
)
return action
def _change_job_state(self, state, result=None):
"""Change the state of the `Job` object
Changing the state of the Job will automatically change some fields
(date, result, ...).
"""
for record in self:
job_ = Job.load(record.env, record.uuid)
if state == DONE:
job_.set_done(result=result)
job_.store()
record.env["queue.job"].flush()
job_.enqueue_waiting()
elif state == PENDING:
job_.set_pending(result=result)
job_.store()
elif state == CANCELLED:
job_.set_cancelled(result=result)
job_.store()
else:
raise ValueError("State not supported: %s" % state)
def button_done(self):
result = _("Manually set to done by %s") % self.env.user.name
self._change_job_state(DONE, result=result)
return True
def button_cancelled(self):
result = _("Cancelled by %s") % self.env.user.name
self._change_job_state(CANCELLED, result=result)
return True
def requeue(self):
jobs_to_requeue = self.filtered(lambda job_: job_.state != WAIT_DEPENDENCIES)
jobs_to_requeue._change_job_state(PENDING)
return True
def _message_post_on_failure(self):
# subscribe the users now to avoid to subscribe them
# at every job creation
domain = self._subscribe_users_domain()
base_users = self.env["res.users"].search(domain)
for record in self:
users = base_users | record.user_id
record.message_subscribe(partner_ids=users.mapped("partner_id").ids)
msg = record._message_failed_job()
if msg:
record.message_post(body=msg, subtype_xmlid="queue_job.mt_job_failed")
def _subscribe_users_domain(self):
"""Subscribe all users having the 'Queue Job Manager' group"""
group = self.env.ref("queue_job.group_queue_job_manager")
if not group:
return None
companies = self.mapped("company_id")
domain = [("groups_id", "=", group.id)]
if companies:
domain.append(("company_id", "in", companies.ids))
return domain
def _message_failed_job(self):
"""Return a message which will be posted on the job when it is failed.
It can be inherited to allow more precise messages based on the
exception informations.
If nothing is returned, no message will be posted.
"""
self.ensure_one()
return _(
"Something bad happened during the execution of the job. "
"More details in the 'Exception Information' section."
)
def _needaction_domain_get(self):
"""Returns the domain to filter records that require an action
:return: domain or False is no action
"""
return [("state", "=", "failed")]
def autovacuum(self):
"""Delete all jobs done based on the removal interval defined on the
channel
Called from a cron.
"""
for channel in self.env["queue.job.channel"].search([]):
deadline = datetime.now() - timedelta(days=int(channel.removal_interval))
while True:
jobs = self.search(
[
"|",
("date_done", "<=", deadline),
("date_cancelled", "<=", deadline),
("channel", "=", channel.complete_name),
],
limit=1000,
)
if jobs:
jobs.unlink()
if not config["test_enable"]:
self.env.cr.commit() # pylint: disable=E8102
else:
break
return True
def requeue_stuck_jobs(self, enqueued_delta=5, started_delta=0):
"""Fix jobs that are in a bad states
:param in_queue_delta: lookup time in minutes for jobs
that are in enqueued state
:param started_delta: lookup time in minutes for jobs
that are in enqueued state,
0 means that it is not checked
"""
self._get_stuck_jobs_to_requeue(
enqueued_delta=enqueued_delta, started_delta=started_delta
).requeue()
return True
def _get_stuck_jobs_domain(self, queue_dl, started_dl):
domain = []
now = fields.datetime.now()
if queue_dl:
queue_dl = now - timedelta(minutes=queue_dl)
domain.append(
[
"&",
("date_enqueued", "<=", fields.Datetime.to_string(queue_dl)),
("state", "=", "enqueued"),
]
)
if started_dl:
started_dl = now - timedelta(minutes=started_dl)
domain.append(
[
"&",
("date_started", "<=", fields.Datetime.to_string(started_dl)),
("state", "=", "started"),
]
)
if not domain:
raise exceptions.ValidationError(
_("If both parameters are 0, ALL jobs will be requeued!")
)
return expression.OR(domain)
def _get_stuck_jobs_to_requeue(self, enqueued_delta, started_delta):
job_model = self.env["queue.job"]
stuck_jobs = job_model.search(
self._get_stuck_jobs_domain(enqueued_delta, started_delta)
)
return stuck_jobs
def related_action_open_record(self):
"""Open a form view with the record(s) of the job.
For instance, for a job on a ``product.product``, it will open a
``product.product`` form view with the product record(s) concerned by
the job. If the job concerns more than one record, it opens them in a
list.
This is the default related action.
"""
self.ensure_one()
records = self.records.exists()
if not records:
return None
action = {
"name": _("Related Record"),
"type": "ir.actions.act_window",
"view_mode": "form",
"res_model": records._name,
}
if len(records) == 1:
action["res_id"] = records.id
else:
action.update(
{
"name": _("Related Records"),
"view_mode": "tree,form",
"domain": [("id", "in", records.ids)],
}
)
return action
def _test_job(self, failure_rate=0):
_logger.info("Running test job.")
if random.random() <= failure_rate:
raise JobError("Job failed")
| 35.765873 | 18,026 |
3,417 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import _, api, exceptions, fields, models
class QueueJobChannel(models.Model):
_name = "queue.job.channel"
_description = "Job Channels"
name = fields.Char()
complete_name = fields.Char(
compute="_compute_complete_name", store=True, readonly=True, recursive=True
)
parent_id = fields.Many2one(
comodel_name="queue.job.channel", string="Parent Channel", ondelete="restrict"
)
job_function_ids = fields.One2many(
comodel_name="queue.job.function",
inverse_name="channel_id",
string="Job Functions",
)
removal_interval = fields.Integer(
default=lambda self: self.env["queue.job"]._removal_interval, required=True
)
_sql_constraints = [
("name_uniq", "unique(complete_name)", "Channel complete name must be unique")
]
@api.depends("name", "parent_id.complete_name")
def _compute_complete_name(self):
for record in self:
if not record.name:
complete_name = "" # new record
elif record.parent_id:
complete_name = ".".join([record.parent_id.complete_name, record.name])
else:
complete_name = record.name
record.complete_name = complete_name
@api.constrains("parent_id", "name")
def parent_required(self):
for record in self:
if record.name != "root" and not record.parent_id:
raise exceptions.ValidationError(_("Parent channel required."))
@api.model_create_multi
def create(self, vals_list):
records = self.browse()
if self.env.context.get("install_mode"):
# installing a module that creates a channel: rebinds the channel
# to an existing one (likely we already had the channel created by
# the @job decorator previously)
new_vals_list = []
for vals in vals_list:
name = vals.get("name")
parent_id = vals.get("parent_id")
if name and parent_id:
existing = self.search(
[("name", "=", name), ("parent_id", "=", parent_id)]
)
if existing:
if not existing.get_metadata()[0].get("noupdate"):
existing.write(vals)
records |= existing
continue
new_vals_list.append(vals)
vals_list = new_vals_list
records |= super().create(vals_list)
return records
def write(self, values):
for channel in self:
if (
not self.env.context.get("install_mode")
and channel.name == "root"
and ("name" in values or "parent_id" in values)
):
raise exceptions.UserError(_("Cannot change the root channel"))
return super().write(values)
def unlink(self):
for channel in self:
if channel.name == "root":
raise exceptions.UserError(_("Cannot remove the root channel"))
return super().unlink()
def name_get(self):
result = []
for record in self:
result.append((record.id, record.complete_name))
return result
| 36.351064 | 3,417 |
346 |
py
|
PYTHON
|
15.0
|
# Copyright 2020 Camptocamp
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import fields, models
class IrModelFields(models.Model):
_inherit = "ir.model.fields"
ttype = fields.Selection(
selection_add=[("job_serialized", "Job Serialized")],
ondelete={"job_serialized": "cascade"},
)
| 26.615385 | 346 |
9,426 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import ast
import logging
import re
from collections import namedtuple
from odoo import _, api, exceptions, fields, models, tools
from ..fields import JobSerialized
_logger = logging.getLogger(__name__)
regex_job_function_name = re.compile(r"^<([0-9a-z_\.]+)>\.([0-9a-zA-Z_]+)$")
class QueueJobFunction(models.Model):
_name = "queue.job.function"
_description = "Job Functions"
_log_access = False
JobConfig = namedtuple(
"JobConfig",
"channel "
"retry_pattern "
"related_action_enable "
"related_action_func_name "
"related_action_kwargs "
"job_function_id ",
)
def _default_channel(self):
return self.env.ref("queue_job.channel_root")
name = fields.Char(
compute="_compute_name",
inverse="_inverse_name",
index=True,
store=True,
)
# model and method should be required, but the required flag doesn't
# let a chance to _inverse_name to be executed
model_id = fields.Many2one(
comodel_name="ir.model", string="Model", ondelete="cascade"
)
method = fields.Char()
channel_id = fields.Many2one(
comodel_name="queue.job.channel",
string="Channel",
required=True,
default=lambda r: r._default_channel(),
)
channel = fields.Char(related="channel_id.complete_name", store=True, readonly=True)
retry_pattern = JobSerialized(string="Retry Pattern (serialized)", base_type=dict)
edit_retry_pattern = fields.Text(
string="Retry Pattern",
compute="_compute_edit_retry_pattern",
inverse="_inverse_edit_retry_pattern",
help="Pattern expressing from the count of retries on retryable errors,"
" the number of of seconds to postpone the next execution. Setting the "
"number of seconds to a 2-element tuple or list will randomize the "
"retry interval between the 2 values.\n"
"Example: {1: 10, 5: 20, 10: 30, 15: 300}.\n"
"Example: {1: (1, 10), 5: (11, 20), 10: (21, 30), 15: (100, 300)}.\n"
"See the module description for details.",
)
related_action = JobSerialized(string="Related Action (serialized)", base_type=dict)
edit_related_action = fields.Text(
string="Related Action",
compute="_compute_edit_related_action",
inverse="_inverse_edit_related_action",
help="The action when the button *Related Action* is used on a job. "
"The default action is to open the view of the record related "
"to the job. Configured as a dictionary with optional keys: "
"enable, func_name, kwargs.\n"
"See the module description for details.",
)
@api.depends("model_id.model", "method")
def _compute_name(self):
for record in self:
if not (record.model_id and record.method):
record.name = ""
continue
record.name = self.job_function_name(record.model_id.model, record.method)
def _inverse_name(self):
groups = regex_job_function_name.match(self.name)
if not groups:
raise exceptions.UserError(_("Invalid job function: {}").format(self.name))
model_name = groups[1]
method = groups[2]
model = (
self.env["ir.model"].sudo().search([("model", "=", model_name)], limit=1)
)
if not model:
raise exceptions.UserError(_("Model {} not found").format(model_name))
self.model_id = model.id
self.method = method
@api.depends("retry_pattern")
def _compute_edit_retry_pattern(self):
for record in self:
retry_pattern = record._parse_retry_pattern()
record.edit_retry_pattern = str(retry_pattern)
def _inverse_edit_retry_pattern(self):
try:
edited = (self.edit_retry_pattern or "").strip()
if edited:
self.retry_pattern = ast.literal_eval(edited)
else:
self.retry_pattern = {}
except (ValueError, TypeError, SyntaxError) as ex:
raise exceptions.UserError(
self._retry_pattern_format_error_message()
) from ex
@api.depends("related_action")
def _compute_edit_related_action(self):
for record in self:
record.edit_related_action = str(record.related_action)
def _inverse_edit_related_action(self):
try:
edited = (self.edit_related_action or "").strip()
if edited:
self.related_action = ast.literal_eval(edited)
else:
self.related_action = {}
except (ValueError, TypeError, SyntaxError) as ex:
raise exceptions.UserError(
self._related_action_format_error_message()
) from ex
@staticmethod
def job_function_name(model_name, method_name):
return "<{}>.{}".format(model_name, method_name)
def job_default_config(self):
return self.JobConfig(
channel="root",
retry_pattern={},
related_action_enable=True,
related_action_func_name=None,
related_action_kwargs={},
job_function_id=None,
)
def _parse_retry_pattern(self):
try:
# as json can't have integers as keys and the field is stored
# as json, convert back to int
retry_pattern = {
int(try_count): postpone_seconds
for try_count, postpone_seconds in self.retry_pattern.items()
}
except ValueError:
_logger.error(
"Invalid retry pattern for job function %s,"
" keys could not be parsed as integers, fallback"
" to the default retry pattern.",
self.name,
)
retry_pattern = {}
return retry_pattern
@tools.ormcache("name")
def job_config(self, name):
config = self.search([("name", "=", name)], limit=1)
if not config:
return self.job_default_config()
retry_pattern = config._parse_retry_pattern()
return self.JobConfig(
channel=config.channel,
retry_pattern=retry_pattern,
related_action_enable=config.related_action.get("enable", True),
related_action_func_name=config.related_action.get("func_name"),
related_action_kwargs=config.related_action.get("kwargs", {}),
job_function_id=config.id,
)
def _retry_pattern_format_error_message(self):
return _(
"Unexpected format of Retry Pattern for {}.\n"
"Example of valid format:\n"
"{{1: 300, 5: 600, 10: 1200, 15: 3000}}"
).format(self.name)
@api.constrains("retry_pattern")
def _check_retry_pattern(self):
for record in self:
retry_pattern = record.retry_pattern
if not retry_pattern:
continue
all_values = list(retry_pattern) + list(retry_pattern.values())
for value in all_values:
try:
int(value)
except ValueError as ex:
raise exceptions.UserError(
record._retry_pattern_format_error_message()
) from ex
def _related_action_format_error_message(self):
return _(
"Unexpected format of Related Action for {}.\n"
"Example of valid format:\n"
'{{"enable": True, "func_name": "related_action_foo",'
' "kwargs" {{"limit": 10}}}}'
).format(self.name)
@api.constrains("related_action")
def _check_related_action(self):
valid_keys = ("enable", "func_name", "kwargs")
for record in self:
related_action = record.related_action
if not related_action:
continue
if any(key not in valid_keys for key in related_action):
raise exceptions.UserError(
record._related_action_format_error_message()
)
@api.model_create_multi
def create(self, vals_list):
records = self.browse()
if self.env.context.get("install_mode"):
# installing a module that creates a job function: rebinds the record
# to an existing one (likely we already had the job function created by
# the @job decorator previously)
new_vals_list = []
for vals in vals_list:
name = vals.get("name")
if name:
existing = self.search([("name", "=", name)], limit=1)
if existing:
if not existing.get_metadata()[0].get("noupdate"):
existing.write(vals)
records |= existing
continue
new_vals_list.append(vals)
vals_list = new_vals_list
records |= super().create(vals_list)
self.clear_caches()
return records
def write(self, values):
res = super().write(values)
self.clear_caches()
return res
def unlink(self):
res = super().unlink()
self.clear_caches()
return res
| 35.977099 | 9,426 |
10,352 |
py
|
PYTHON
|
15.0
|
# Copyright 2016 Camptocamp
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import functools
import logging
from odoo import api, models
from ..delay import Delayable
from ..job import DelayableRecordset
_logger = logging.getLogger(__name__)
class Base(models.AbstractModel):
"""The base model, which is implicitly inherited by all models.
A new :meth:`~with_delay` method is added on all Odoo Models, allowing to
postpone the execution of a job method in an asynchronous process.
"""
_inherit = "base"
def with_delay(
self,
priority=None,
eta=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
"""Return a ``DelayableRecordset``
It is a shortcut for the longer form as shown below::
self.with_delay(priority=20).action_done()
# is equivalent to:
self.delayable().set(priority=20).action_done().delay()
``with_delay()`` accepts job properties which specify how the job will
be executed.
Usage with job properties::
env['a.model'].with_delay(priority=30, eta=60*60*5).action_done()
delayable.export_one_thing(the_thing_to_export)
# => the job will be executed with a low priority and not before a
# delay of 5 hours from now
When using :meth:``with_delay``, the final ``delay()`` is implicit.
See the documentation of :meth:``delayable`` for more details.
:return: instance of a DelayableRecordset
:rtype: :class:`odoo.addons.queue_job.job.DelayableRecordset`
"""
return DelayableRecordset(
self,
priority=priority,
eta=eta,
max_retries=max_retries,
description=description,
channel=channel,
identity_key=identity_key,
)
def delayable(
self,
priority=None,
eta=None,
max_retries=None,
description=None,
channel=None,
identity_key=None,
):
"""Return a ``Delayable``
The returned instance allows to enqueue any method of the recordset's
Model.
Usage::
delayable = self.env["res.users"].browse(10).delayable(priority=20)
delayable.do_work(name="test"}).delay()
In this example, the ``do_work`` method will not be executed directly.
It will be executed in an asynchronous job.
Method calls on a Delayable generally return themselves, so calls can
be chained together::
delayable.set(priority=15).do_work(name="test"}).delay()
The order of the calls that build the job is not relevant, beside
the call to ``delay()`` that must happen at the very end. This is
equivalent to the example above::
delayable.do_work(name="test"}).set(priority=15).delay()
Very importantly, ``delay()`` must be called on the top-most parent
of a chain of jobs, so if you have this::
job1 = record1.delayable().do_work()
job2 = record2.delayable().do_work()
job1.on_done(job2)
The ``delay()`` call must be made on ``job1``, otherwise ``job2`` will
be delayed, but ``job1`` will never be. When done on ``job1``, the
``delay()`` call will traverse the graph of jobs and delay all of
them::
job1.delay()
For more details on the graph dependencies, read the documentation of
:module:`~odoo.addons.queue_job.delay`.
:param priority: Priority of the job, 0 being the higher priority.
Default is 10.
:param eta: Estimated Time of Arrival of the job. It will not be
executed before this date/time.
:param max_retries: maximum number of retries before giving up and set
the job state to 'failed'. A value of 0 means
infinite retries. Default is 5.
:param description: human description of the job. If None, description
is computed from the function doc or name
:param channel: the complete name of the channel to use to process
the function. If specified it overrides the one
defined on the function
:param identity_key: key uniquely identifying the job, if specified
and a job with the same key has not yet been run,
the new job will not be added. It is either a
string, either a function that takes the job as
argument (see :py:func:`..job.identity_exact`).
the new job will not be added.
:return: instance of a Delayable
:rtype: :class:`odoo.addons.queue_job.job.Delayable`
"""
return Delayable(
self,
priority=priority,
eta=eta,
max_retries=max_retries,
description=description,
channel=channel,
identity_key=identity_key,
)
def _patch_job_auto_delay(self, method_name, context_key=None):
"""Patch a method to be automatically delayed as job method when called
This patch method has to be called in ``_register_hook`` (example
below).
When a method is patched, any call to the method will not directly
execute the method's body, but will instead enqueue a job.
When a ``context_key`` is set when calling ``_patch_job_auto_delay``,
the patched method is automatically delayed only when this key is
``True`` in the caller's context. It is advised to patch the method
with a ``context_key``, because making the automatic delay *in any
case* can produce nasty and unexpected side effects (e.g. another
module calls the method and expects it to be computed before doing
something else, expecting a result, ...).
A typical use case is when a method in a module we don't control is
called synchronously in the middle of another method, and we'd like all
the calls to this method become asynchronous.
The options of the job usually passed to ``with_delay()`` (priority,
description, identity_key, ...) can be returned in a dictionary by a
method named after the name of the method suffixed by ``_job_options``
which takes the same parameters as the initial method.
It is still possible to force synchronous execution of the method by
setting a key ``_job_force_sync`` to True in the environment context.
Example patching the "foo" method to be automatically delayed as job
(the job options method is optional):
.. code-block:: python
# original method:
def foo(self, arg1):
print("hello", arg1)
def large_method(self):
# doing a lot of things
self.foo("world)
# doing a lot of other things
def button_x(self):
self.with_context(auto_delay_foo=True).large_method()
# auto delay patch:
def foo_job_options(self, arg1):
return {
"priority": 100,
"description": "Saying hello to {}".format(arg1)
}
def _register_hook(self):
self._patch_method(
"foo",
self._patch_job_auto_delay("foo", context_key="auto_delay_foo")
)
return super()._register_hook()
The result when ``button_x`` is called, is that a new job for ``foo``
is delayed.
"""
def auto_delay_wrapper(self, *args, **kwargs):
# when no context_key is set, we delay in any case (warning, can be
# dangerous)
context_delay = self.env.context.get(context_key) if context_key else True
if (
self.env.context.get("job_uuid")
or not context_delay
or self.env.context.get("_job_force_sync")
or self.env.context.get("test_queue_job_no_delay")
):
# we are in the job execution
return auto_delay_wrapper.origin(self, *args, **kwargs)
else:
# replace the synchronous call by a job on itself
method_name = auto_delay_wrapper.origin.__name__
job_options_method = getattr(
self, "{}_job_options".format(method_name), None
)
job_options = {}
if job_options_method:
job_options.update(job_options_method(*args, **kwargs))
delayed = self.with_delay(**job_options)
return getattr(delayed, method_name)(*args, **kwargs)
origin = getattr(self, method_name)
return functools.update_wrapper(auto_delay_wrapper, origin)
@api.model
def _job_store_values(self, job):
"""Hook for manipulating job stored values.
You can define a more specific hook for a job function
by defining a method name with this pattern:
`_queue_job_store_values_${func_name}`
NOTE: values will be stored only if they match stored fields on `queue.job`.
:param job: current queue_job.job.Job instance.
:return: dictionary for setting job values.
"""
return {}
@api.model
def _job_prepare_context_before_enqueue_keys(self):
"""Keys to keep in context of stored jobs
Empty by default for backward compatibility.
"""
# TODO: when migrating to 16.0, active the base context keys:
# return ("tz", "lang", "allowed_company_ids", "force_company", "active_test")
return ()
def _job_prepare_context_before_enqueue(self):
"""Return the context to store in the jobs
Can be used to keep only safe keys.
"""
return {
key: value
for key, value in self.env.context.items()
if key in self._job_prepare_context_before_enqueue_keys()
}
| 38.058824 | 10,352 |
18,946 |
py
|
PYTHON
|
15.0
|
# Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2015-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
"""
What is the job runner?
-----------------------
The job runner is the main process managing the dispatch of delayed jobs to
available Odoo workers
How does it work?
-----------------
* It starts as a thread in the Odoo main process or as a new worker
* It receives postgres NOTIFY messages each time jobs are
added or updated in the queue_job table.
* It maintains an in-memory priority queue of jobs that
is populated from the queue_job tables in all databases.
* It does not run jobs itself, but asks Odoo to run them through an
anonymous ``/queue_job/runjob`` HTTP request. [1]_
How to use it?
--------------
* Optionally adjust your configuration through environment variables:
- ``ODOO_QUEUE_JOB_CHANNELS=root:4`` (or any other channels
configuration), default ``root:1``.
- ``ODOO_QUEUE_JOB_SCHEME=https``, default ``http``.
- ``ODOO_QUEUE_JOB_HOST=load-balancer``, default ``http_interface``
or ``localhost`` if unset.
- ``ODOO_QUEUE_JOB_PORT=443``, default ``http_port`` or 8069 if unset.
- ``ODOO_QUEUE_JOB_HTTP_AUTH_USER=jobrunner``, default empty.
- ``ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD=s3cr3t``, default empty.
- ``ODOO_QUEUE_JOB_JOBRUNNER_DB_HOST=master-db``, default ``db_host``
or ``False`` if unset.
- ``ODOO_QUEUE_JOB_JOBRUNNER_DB_PORT=5432``, default ``db_port``
or ``False`` if unset.
- ``ODOO_QUEUE_JOB_JOBRUNNER_DB_USER=userdb``, default ``db_user``
or ``False`` if unset.
- ``ODOO_QUEUE_JOB_JOBRUNNER_DB_PASSWORD=passdb``, default ``db_password``
or ``False`` if unset.
* Alternatively, configure the channels through the Odoo configuration
file, like:
.. code-block:: ini
[queue_job]
channels = root:4
scheme = https
host = load-balancer
port = 443
http_auth_user = jobrunner
http_auth_password = s3cr3t
jobrunner_db_host = master-db
jobrunner_db_port = 5432
jobrunner_db_user = userdb
jobrunner_db_password = passdb
* Or, if using ``anybox.recipe.odoo``, add this to your buildout configuration:
.. code-block:: ini
[odoo]
recipe = anybox.recipe.odoo
(...)
queue_job.channels = root:4
queue_job.scheme = https
queue_job.host = load-balancer
queue_job.port = 443
queue_job.http_auth_user = jobrunner
queue_job.http_auth_password = s3cr3t
* Start Odoo with ``--load=web,web_kanban,queue_job``
and ``--workers`` greater than 1 [2]_, or set the ``server_wide_modules``
option in The Odoo configuration file:
.. code-block:: ini
[options]
(...)
workers = 4
server_wide_modules = web,web_kanban,queue_job
(...)
* Or, if using ``anybox.recipe.odoo``:
.. code-block:: ini
[odoo]
recipe = anybox.recipe.odoo
(...)
options.workers = 4
options.server_wide_modules = web,web_kanban,queue_job
* Confirm the runner is starting correctly by checking the odoo log file:
.. code-block:: none
...INFO...queue_job.jobrunner.runner: starting
...INFO...queue_job.jobrunner.runner: initializing database connections
...INFO...queue_job.jobrunner.runner: queue job runner ready for db <dbname>
...INFO...queue_job.jobrunner.runner: database connections ready
* Create jobs (eg using base_import_async) and observe they
start immediately and in parallel.
* Tip: to enable debug logging for the queue job, use
``--log-handler=odoo.addons.queue_job:DEBUG``
Caveat
------
* After creating a new database or installing queue_job on an
existing database, Odoo must be restarted for the runner to detect it.
* When Odoo shuts down normally, it waits for running jobs to finish.
However, when the Odoo server crashes or is otherwise force-stopped,
running jobs are interrupted while the runner has no chance to know
they have been aborted. In such situations, jobs may remain in
``started`` or ``enqueued`` state after the Odoo server is halted.
Since the runner has no way to know if they are actually running or
not, and does not know for sure if it is safe to restart the jobs,
it does not attempt to restart them automatically. Such stale jobs
therefore fill the running queue and prevent other jobs to start.
You must therefore requeue them manually, either from the Jobs view,
or by running the following SQL statement *before starting Odoo*:
.. code-block:: sql
update queue_job set state='pending' where state in ('started', 'enqueued')
.. rubric:: Footnotes
.. [1] From a security standpoint, it is safe to have an anonymous HTTP
request because this request only accepts to run jobs that are
enqueued.
.. [2] It works with the threaded Odoo server too, although this way
of running Odoo is obviously not for production purposes.
"""
import datetime
import logging
import os
import selectors
import threading
import time
from contextlib import closing, contextmanager
import psycopg2
import requests
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import odoo
from odoo.tools import config
from . import queue_job_config
from .channels import ENQUEUED, NOT_DONE, PENDING, ChannelManager
SELECT_TIMEOUT = 60
ERROR_RECOVERY_DELAY = 5
_logger = logging.getLogger(__name__)
select = selectors.DefaultSelector
# Unfortunately, it is not possible to extend the Odoo
# server command line arguments, so we resort to environment variables
# to configure the runner (channels mostly).
#
# On the other hand, the odoo configuration file can be extended at will,
# so we check it in addition to the environment variables.
def _channels():
return (
os.environ.get("ODOO_QUEUE_JOB_CHANNELS")
or queue_job_config.get("channels")
or "root:1"
)
def _datetime_to_epoch(dt):
# important: this must return the same as postgresql
# EXTRACT(EPOCH FROM TIMESTAMP dt)
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
def _odoo_now():
dt = datetime.datetime.utcnow()
return _datetime_to_epoch(dt)
def _connection_info_for(db_name):
db_or_uri, connection_info = odoo.sql_db.connection_info_for(db_name)
for p in ("host", "port", "user", "password"):
cfg = os.environ.get(
"ODOO_QUEUE_JOB_JOBRUNNER_DB_%s" % p.upper()
) or queue_job_config.get("jobrunner_db_" + p)
if cfg:
connection_info[p] = cfg
return connection_info
def _async_http_get(scheme, host, port, user, password, db_name, job_uuid):
# Method to set failed job (due to timeout, etc) as pending,
# to avoid keeping it as enqueued.
def set_job_pending():
connection_info = _connection_info_for(db_name)
conn = psycopg2.connect(**connection_info)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with closing(conn.cursor()) as cr:
cr.execute(
"UPDATE queue_job SET state=%s, "
"date_enqueued=NULL, date_started=NULL "
"WHERE uuid=%s and state=%s "
"RETURNING uuid",
(PENDING, job_uuid, ENQUEUED),
)
if cr.fetchone():
_logger.warning(
"state of job %s was reset from %s to %s",
job_uuid,
ENQUEUED,
PENDING,
)
# TODO: better way to HTTP GET asynchronously (grequest, ...)?
# if this was python3 I would be doing this with
# asyncio, aiohttp and aiopg
def urlopen():
url = "{}://{}:{}/queue_job/runjob?db={}&job_uuid={}".format(
scheme, host, port, db_name, job_uuid
)
try:
auth = None
if user:
auth = (user, password)
# we are not interested in the result, so we set a short timeout
# but not too short so we trap and log hard configuration errors
response = requests.get(url, timeout=1, auth=auth)
# raise_for_status will result in either nothing, a Client Error
# for HTTP Response codes between 400 and 500 or a Server Error
# for codes between 500 and 600
response.raise_for_status()
except requests.Timeout:
set_job_pending()
except Exception:
_logger.exception("exception in GET %s", url)
set_job_pending()
thread = threading.Thread(target=urlopen)
thread.daemon = True
thread.start()
class Database(object):
def __init__(self, db_name):
self.db_name = db_name
connection_info = _connection_info_for(db_name)
self.conn = psycopg2.connect(**connection_info)
self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.has_queue_job = self._has_queue_job()
if self.has_queue_job:
self._initialize()
def close(self):
# pylint: disable=except-pass
# if close fail for any reason, it's either because it's already closed
# and we don't care, or for any reason but anyway it will be closed on
# del
try:
self.conn.close()
except Exception:
pass
self.conn = None
def _has_queue_job(self):
with closing(self.conn.cursor()) as cr:
cr.execute(
"SELECT 1 FROM pg_tables WHERE tablename=%s", ("ir_module_module",)
)
if not cr.fetchone():
_logger.debug("%s doesn't seem to be an odoo db", self.db_name)
return False
cr.execute(
"SELECT 1 FROM ir_module_module WHERE name=%s AND state=%s",
("queue_job", "installed"),
)
if not cr.fetchone():
_logger.debug("queue_job is not installed for db %s", self.db_name)
return False
cr.execute(
"""SELECT COUNT(1)
FROM information_schema.triggers
WHERE event_object_table = %s
AND trigger_name = %s""",
("queue_job", "queue_job_notify"),
)
if cr.fetchone()[0] != 3: # INSERT, DELETE, UPDATE
_logger.error(
"queue_job_notify trigger is missing in db %s", self.db_name
)
return False
return True
def _initialize(self):
with closing(self.conn.cursor()) as cr:
cr.execute("LISTEN queue_job")
@contextmanager
def select_jobs(self, where, args):
# pylint: disable=sql-injection
# the checker thinks we are injecting values but we are not, we are
# adding the where conditions, values are added later properly with
# parameters
query = (
"SELECT channel, uuid, id as seq, date_created, "
"priority, EXTRACT(EPOCH FROM eta), state "
"FROM queue_job WHERE %s" % (where,)
)
with closing(self.conn.cursor("select_jobs", withhold=True)) as cr:
cr.execute(query, args)
yield cr
def keep_alive(self):
query = "SELECT 1"
with closing(self.conn.cursor()) as cr:
cr.execute(query)
def set_job_enqueued(self, uuid):
with closing(self.conn.cursor()) as cr:
cr.execute(
"UPDATE queue_job SET state=%s, "
"date_enqueued=date_trunc('seconds', "
" now() at time zone 'utc') "
"WHERE uuid=%s",
(ENQUEUED, uuid),
)
class QueueJobRunner(object):
def __init__(
self,
scheme="http",
host="localhost",
port=8069,
user=None,
password=None,
channel_config_string=None,
):
self.scheme = scheme
self.host = host
self.port = port
self.user = user
self.password = password
self.channel_manager = ChannelManager()
if channel_config_string is None:
channel_config_string = _channels()
self.channel_manager.simple_configure(channel_config_string)
self.db_by_name = {}
self._stop = False
self._stop_pipe = os.pipe()
@classmethod
def from_environ_or_config(cls):
scheme = os.environ.get("ODOO_QUEUE_JOB_SCHEME") or queue_job_config.get(
"scheme"
)
host = (
os.environ.get("ODOO_QUEUE_JOB_HOST")
or queue_job_config.get("host")
or config["http_interface"]
)
port = (
os.environ.get("ODOO_QUEUE_JOB_PORT")
or queue_job_config.get("port")
or config["http_port"]
)
user = os.environ.get("ODOO_QUEUE_JOB_HTTP_AUTH_USER") or queue_job_config.get(
"http_auth_user"
)
password = os.environ.get(
"ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD"
) or queue_job_config.get("http_auth_password")
runner = cls(
scheme=scheme or "http",
host=host or "localhost",
port=port or 8069,
user=user,
password=password,
)
return runner
def get_db_names(self):
if config["db_name"]:
db_names = config["db_name"].split(",")
else:
db_names = odoo.service.db.exp_list(True)
return db_names
def close_databases(self, remove_jobs=True):
for db_name, db in self.db_by_name.items():
try:
if remove_jobs:
self.channel_manager.remove_db(db_name)
db.close()
except Exception:
_logger.warning("error closing database %s", db_name, exc_info=True)
self.db_by_name = {}
def initialize_databases(self):
for db_name in self.get_db_names():
db = Database(db_name)
if db.has_queue_job:
self.db_by_name[db_name] = db
with db.select_jobs("state in %s", (NOT_DONE,)) as cr:
for job_data in cr:
self.channel_manager.notify(db_name, *job_data)
_logger.info("queue job runner ready for db %s", db_name)
def run_jobs(self):
now = _odoo_now()
for job in self.channel_manager.get_jobs_to_run(now):
if self._stop:
break
_logger.info("asking Odoo to run job %s on db %s", job.uuid, job.db_name)
self.db_by_name[job.db_name].set_job_enqueued(job.uuid)
_async_http_get(
self.scheme,
self.host,
self.port,
self.user,
self.password,
job.db_name,
job.uuid,
)
def process_notifications(self):
for db in self.db_by_name.values():
if not db.conn.notifies:
# If there are no activity in the queue_job table it seems that
# tcp keepalives are not sent (in that very specific scenario),
# causing some intermediaries (such as haproxy) to close the
# connection, making the jobrunner to restart on a socket error
db.keep_alive()
while db.conn.notifies:
if self._stop:
break
notification = db.conn.notifies.pop()
uuid = notification.payload
with db.select_jobs("uuid = %s", (uuid,)) as cr:
job_datas = cr.fetchone()
if job_datas:
self.channel_manager.notify(db.db_name, *job_datas)
else:
self.channel_manager.remove_job(uuid)
def wait_notification(self):
for db in self.db_by_name.values():
if db.conn.notifies:
# something is going on in the queue, no need to wait
return
# wait for something to happen in the queue_job tables
# we'll select() on database connections and the stop pipe
conns = [db.conn for db in self.db_by_name.values()]
conns.append(self._stop_pipe[0])
# look if the channels specify a wakeup time
wakeup_time = self.channel_manager.get_wakeup_time()
if not wakeup_time:
# this could very well be no timeout at all, because
# any activity in the job queue will wake us up, but
# let's have a timeout anyway, just to be safe
timeout = SELECT_TIMEOUT
else:
timeout = wakeup_time - _odoo_now()
# wait for a notification or a timeout;
# if timeout is negative (ie wakeup time in the past),
# do not wait; this should rarely happen
# because of how get_wakeup_time is designed; actually
# if timeout remains a large negative number, it is most
# probably a bug
_logger.debug("select() timeout: %.2f sec", timeout)
if timeout > 0:
if conns and not self._stop:
with select() as sel:
for conn in conns:
sel.register(conn, selectors.EVENT_READ)
events = sel.select(timeout=timeout)
for key, _mask in events:
if key.fileobj == self._stop_pipe[0]:
# stop-pipe is not a conn so doesn't need poll()
continue
key.fileobj.poll()
def stop(self):
_logger.info("graceful stop requested")
self._stop = True
# wakeup the select() in wait_notification
os.write(self._stop_pipe[1], b".")
def run(self):
_logger.info("starting")
while not self._stop:
# outer loop does exception recovery
try:
_logger.info("initializing database connections")
# TODO: how to detect new databases or databases
# on which queue_job is installed after server start?
self.initialize_databases()
_logger.info("database connections ready")
# inner loop does the normal processing
while not self._stop:
self.process_notifications()
self.run_jobs()
self.wait_notification()
except KeyboardInterrupt:
self.stop()
except InterruptedError:
# Interrupted system call, i.e. KeyboardInterrupt during select
self.stop()
except Exception:
_logger.exception(
"exception: sleeping %ds and retrying", ERROR_RECOVERY_DELAY
)
self.close_databases()
time.sleep(ERROR_RECOVERY_DELAY)
self.close_databases(remove_jobs=False)
_logger.info("stopped")
| 35.479401 | 18,946 |
36,655 |
py
|
PYTHON
|
15.0
|
# Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2015-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
from functools import total_ordering
from heapq import heappop, heappush
from weakref import WeakValueDictionary
from ..exception import ChannelNotFound
from ..job import CANCELLED, DONE, ENQUEUED, FAILED, PENDING, STARTED, WAIT_DEPENDENCIES
NOT_DONE = (WAIT_DEPENDENCIES, PENDING, ENQUEUED, STARTED, FAILED)
_logger = logging.getLogger(__name__)
class PriorityQueue(object):
"""A priority queue that supports removing arbitrary objects.
Adding an object already in the queue is a no op.
Popping an empty queue returns None.
>>> q = PriorityQueue()
>>> q.add(2)
>>> q.add(3)
>>> q.add(3)
>>> q.add(1)
>>> q[0]
1
>>> len(q)
3
>>> q.pop()
1
>>> q.remove(2)
>>> len(q)
1
>>> q[0]
3
>>> q.pop()
3
>>> q.pop()
>>> q.add(2)
>>> q.remove(2)
>>> q.add(2)
>>> q.pop()
2
"""
def __init__(self):
self._heap = []
self._known = set() # all objects in the heap (including removed)
self._removed = set() # all objects that have been removed
def __len__(self):
return len(self._known) - len(self._removed)
def __getitem__(self, i):
if i != 0:
raise IndexError()
while True:
if not self._heap:
raise IndexError()
o = self._heap[0]
if o in self._removed:
o2 = heappop(self._heap)
assert o2 == o
self._removed.remove(o)
self._known.remove(o)
else:
return o
def __contains__(self, o):
return o in self._known and o not in self._removed
def add(self, o):
if o is None:
raise ValueError()
if o in self._removed:
self._removed.remove(o)
if o in self._known:
return
self._known.add(o)
heappush(self._heap, o)
def remove(self, o):
if o is None:
raise ValueError()
if o not in self._known:
return
if o not in self._removed:
self._removed.add(o)
def pop(self):
while True:
try:
o = heappop(self._heap)
except IndexError:
# queue is empty
return None
self._known.remove(o)
if o in self._removed:
self._removed.remove(o)
else:
return o
class SafeSet(set):
"""A set that does not raise KeyError when removing non-existent items.
>>> s = SafeSet()
>>> s.remove(1)
>>> len(s)
0
>>> s.remove(1)
"""
def remove(self, o):
# pylint: disable=missing-return,except-pass
try:
super().remove(o)
except KeyError:
pass
@total_ordering
class ChannelJob(object):
"""A channel job is attached to a channel and holds the properties of a
job that are necessary to prioritise them.
Channel jobs are comparable according to the following rules:
* jobs with an eta come before all other jobs
* then jobs with a smaller eta come first
* then jobs with a smaller priority come first
* then jobs with a smaller creation time come first
* then jobs with a smaller sequence come first
Here are some examples.
j1 comes before j2 because it has an earlier date_created
>>> j1 = ChannelJob(None, None, 1,
... seq=0, date_created=1, priority=9, eta=None)
>>> j1
<ChannelJob 1>
>>> j2 = ChannelJob(None, None, 2,
... seq=0, date_created=2, priority=9, eta=None)
>>> j1 < j2
True
j3 comes first because it has lower priority,
despite having a creation date after j1 and j2
>>> j3 = ChannelJob(None, None, 3,
... seq=0, date_created=3, priority=2, eta=None)
>>> j3 < j1
True
j4 and j5 comes even before j3, because they have an eta
>>> j4 = ChannelJob(None, None, 4,
... seq=0, date_created=4, priority=9, eta=9)
>>> j5 = ChannelJob(None, None, 5,
... seq=0, date_created=5, priority=9, eta=9)
>>> j4 < j5 < j3
True
j6 has same date_created and priority as j5 but a smaller eta
>>> j6 = ChannelJob(None, None, 6,
... seq=0, date_created=5, priority=9, eta=2)
>>> j6 < j4 < j5
True
Here is the complete suite:
>>> j6 < j4 < j5 < j3 < j1 < j2
True
j0 has the same properties as j1 but they are not considered
equal as they are different instances
>>> j0 = ChannelJob(None, None, 1,
... seq=0, date_created=1, priority=9, eta=None)
>>> j0 == j1
False
>>> j0 == j0
True
Comparison excluding eta:
>>> j1.sorting_key_ignoring_eta() < j2.sorting_key_ignoring_eta()
True
"""
def __init__(self, db_name, channel, uuid, seq, date_created, priority, eta):
self.db_name = db_name
self.channel = channel
self.uuid = uuid
self.seq = seq
self.date_created = date_created
self.priority = priority
self.eta = eta
def __repr__(self):
return "<ChannelJob %s>" % self.uuid
def __eq__(self, other):
return id(self) == id(other)
def __hash__(self):
return id(self)
def sorting_key(self):
return self.eta, self.priority, self.date_created, self.seq
def sorting_key_ignoring_eta(self):
return self.priority, self.date_created, self.seq
def __lt__(self, other):
if self.eta and not other.eta:
return True
elif not self.eta and other.eta:
return False
return self.sorting_key() < other.sorting_key()
class ChannelQueue(object):
"""A channel queue is a priority queue for jobs.
Jobs with an eta are set aside until their eta is past due, at
which point they start competing normally with other jobs.
>>> q = ChannelQueue()
>>> j1 = ChannelJob(None, None, 1,
... seq=0, date_created=1, priority=1, eta=10)
>>> j2 = ChannelJob(None, None, 2,
... seq=0, date_created=2, priority=1, eta=None)
>>> j3 = ChannelJob(None, None, 3,
... seq=0, date_created=3, priority=1, eta=None)
>>> q.add(j1)
>>> q.add(j2)
>>> q.add(j3)
Wakeup time is the eta of job 1.
>>> q.get_wakeup_time()
10
We have not reached the eta of job 1, so we get job 2.
>>> q.pop(now=1)
<ChannelJob 2>
Wakeup time is still the eta of job 1, and we get job 1 when we are past
it's eta.
>>> q.get_wakeup_time()
10
>>> q.pop(now=11)
<ChannelJob 1>
Now there is no wakeup time anymore, because no job have an eta.
>>> q.get_wakeup_time()
0
>>> q.pop(now=12)
<ChannelJob 3>
>>> q.get_wakeup_time()
0
>>> q.pop(now=13)
Observe that job with past eta still run after jobs with higher priority.
>>> j4 = ChannelJob(None, None, 4,
... seq=0, date_created=4, priority=10, eta=20)
>>> j5 = ChannelJob(None, None, 5,
... seq=0, date_created=5, priority=1, eta=None)
>>> q.add(j4)
>>> q.add(j5)
>>> q.get_wakeup_time()
20
>>> q.pop(21)
<ChannelJob 5>
>>> q.get_wakeup_time()
0
>>> q.pop(22)
<ChannelJob 4>
Test a sequential queue.
>>> sq = ChannelQueue(sequential=True)
>>> j6 = ChannelJob(None, None, 6,
... seq=0, date_created=6, priority=1, eta=None)
>>> j7 = ChannelJob(None, None, 7,
... seq=0, date_created=7, priority=1, eta=20)
>>> j8 = ChannelJob(None, None, 8,
... seq=0, date_created=8, priority=1, eta=None)
>>> sq.add(j6)
>>> sq.add(j7)
>>> sq.add(j8)
>>> sq.pop(10)
<ChannelJob 6>
>>> sq.pop(15)
>>> sq.pop(20)
<ChannelJob 7>
>>> sq.pop(30)
<ChannelJob 8>
"""
def __init__(self, sequential=False):
self._queue = PriorityQueue()
self._eta_queue = PriorityQueue()
self.sequential = sequential
def __len__(self):
return len(self._eta_queue) + len(self._queue)
def __contains__(self, o):
return o in self._eta_queue or o in self._queue
def add(self, job):
if job.eta:
self._eta_queue.add(job)
else:
self._queue.add(job)
def remove(self, job):
self._eta_queue.remove(job)
self._queue.remove(job)
def pop(self, now):
while self._eta_queue and self._eta_queue[0].eta <= now:
eta_job = self._eta_queue.pop()
eta_job.eta = None
self._queue.add(eta_job)
if self.sequential and self._eta_queue and self._queue:
eta_job = self._eta_queue[0]
job = self._queue[0]
if eta_job.sorting_key_ignoring_eta() < job.sorting_key_ignoring_eta():
# eta ignored, the job with eta has higher priority
# than the job without eta; since it's a sequential
# queue we wait until eta
return None
return self._queue.pop()
def get_wakeup_time(self, wakeup_time=0):
if self._eta_queue:
if not wakeup_time:
wakeup_time = self._eta_queue[0].eta
else:
wakeup_time = min(wakeup_time, self._eta_queue[0].eta)
return wakeup_time
class Channel(object):
"""A channel for jobs, with a maximum capacity.
When jobs are created by queue_job modules, they may be associated
to a job channel. Jobs with no channel are inserted into the root channel.
Job channels are joined in a hierarchy down to the root channel.
When a job channel has available capacity, jobs are dequeued, marked
as running in the channel and are inserted into the queue of the
parent channel where they wait for available capacity and so on.
Job channels can be visualized as water channels with a given flow
limit (= capacity). Channels are joined together in a downstream channel
and the flow limit of the downstream channel limits upstream channels.::
---------------------+
|
|
Ch. A C:4,Q:12,R:4 +-----------------------
---------------------+ Ch. root C:5,Q:0,R:4
|
---------------------+
Ch. B C:1,Q:0,R:0
---------------------+-----------------------
The above diagram illustrates two channels joining in the root channel.
The root channel has a capacity of 5, and 4 running jobs coming from
Channel A. Channel A has a capacity of 4, all in use (passed down to the
root channel), and 12 jobs enqueued. Channel B has a capacity of 1,
none in use. This means that whenever a new job comes in channel B,
there will be available room for it to run in the root channel.
Note that from the point of view of a channel, 'running' means enqueued
in the downstream channel. Only jobs marked running in the root channel
are actually sent to Odoo for execution.
Should a downstream channel have less capacity than its upstream channels,
jobs going downstream will be enqueued in the downstream channel,
and compete normally according to their properties (priority, etc).
Using this technique, it is possible to enforce sequence in a channel
with a capacity of 1. It is also possible to dedicate a channel with a
limited capacity for application-autocreated subchannels
without risking to overflow the system.
"""
def __init__(self, name, parent, capacity=None, sequential=False, throttle=0):
self.name = name
self.parent = parent
if self.parent:
self.parent.children[name] = self
self.children = {}
self._queue = ChannelQueue()
self._running = SafeSet()
self._failed = SafeSet()
self._pause_until = 0 # utc seconds since the epoch
self.capacity = capacity
self.throttle = throttle # seconds
self.sequential = sequential
@property
def sequential(self):
return self._queue.sequential
@sequential.setter
def sequential(self, val):
self._queue.sequential = val
def configure(self, config):
"""Configure a channel from a dictionary.
Supported keys are:
* capacity
* sequential
* throttle
"""
assert self.fullname.endswith(config["name"])
self.capacity = config.get("capacity", None)
self.sequential = bool(config.get("sequential", False))
self.throttle = int(config.get("throttle", 0))
if self.sequential and self.capacity != 1:
raise ValueError("A sequential channel must have a capacity of 1")
@property
def fullname(self):
"""The full name of the channel, in dot separated notation."""
if self.parent:
return self.parent.fullname + "." + self.name
else:
return self.name
def get_subchannel_by_name(self, subchannel_name):
return self.children.get(subchannel_name)
def __str__(self):
capacity = "∞" if self.capacity is None else str(self.capacity)
return "%s(C:%s,Q:%d,R:%d,F:%d)" % (
self.fullname,
capacity,
len(self._queue),
len(self._running),
len(self._failed),
)
def remove(self, job):
"""Remove a job from the channel."""
self._queue.remove(job)
self._running.remove(job)
self._failed.remove(job)
if self.parent:
self.parent.remove(job)
def set_done(self, job):
"""Mark a job as done.
This removes it from the channel queue.
"""
self.remove(job)
_logger.debug("job %s marked done in channel %s", job.uuid, self)
def set_pending(self, job):
"""Mark a job as pending.
This puts the job in the channel queue and remove it
from parent channels queues.
"""
if job not in self._queue:
self._queue.add(job)
self._running.remove(job)
self._failed.remove(job)
if self.parent:
self.parent.remove(job)
_logger.debug("job %s marked pending in channel %s", job.uuid, self)
def set_running(self, job):
"""Mark a job as running.
This also marks the job as running in parent channels.
"""
if job not in self._running:
self._queue.remove(job)
self._running.add(job)
self._failed.remove(job)
if self.parent:
self.parent.set_running(job)
_logger.debug("job %s marked running in channel %s", job.uuid, self)
def set_failed(self, job):
"""Mark the job as failed."""
if job not in self._failed:
self._queue.remove(job)
self._running.remove(job)
self._failed.add(job)
if self.parent:
self.parent.remove(job)
_logger.debug("job %s marked failed in channel %s", job.uuid, self)
def has_capacity(self):
if self.sequential and self._failed:
# a sequential queue blocks on failed jobs
return False
if not self.capacity:
# unlimited capacity
return True
return len(self._running) < self.capacity
def get_jobs_to_run(self, now):
"""Get jobs that are ready to run in channel.
This works by enqueuing jobs that are ready to run in children
channels, then yielding jobs from the channel queue until
``capacity`` jobs are marked running in the channel.
If the ``throttle`` option is set on the channel, then it yields
no job until at least throttle seconds have elapsed since the previous
yield.
:param now: the current datetime in seconds
:return: iterator of
:class:`odoo.addons.queue_job.jobrunner.ChannelJob`
"""
# enqueue jobs of children channels
for child in self.children.values():
for job in child.get_jobs_to_run(now):
self._queue.add(job)
# is this channel paused?
if self.throttle and self._pause_until:
if now < self._pause_until:
if self.has_capacity():
_logger.debug(
"channel %s paused until %s because "
"of throttle delay between jobs",
self,
self._pause_until,
)
return
else:
# unpause, this is important to avoid perpetual wakeup
# while the channel is at full capacity
self._pause_until = 0
_logger.debug("channel %s unpaused at %s", self, now)
# yield jobs that are ready to run, while we have capacity
while self.has_capacity():
job = self._queue.pop(now)
if not job:
return
self._running.add(job)
_logger.debug("job %s marked running in channel %s", job.uuid, self)
yield job
if self.throttle:
self._pause_until = now + self.throttle
_logger.debug("pausing channel %s until %s", self, self._pause_until)
return
def get_wakeup_time(self, wakeup_time=0):
if not self.has_capacity():
# this channel is full, do not request timed wakeup, as
# a notification will wakeup the runner when a job finishes
return wakeup_time
if self._pause_until:
# this channel is paused, request wakeup at the end of the pause
if not wakeup_time:
wakeup_time = self._pause_until
else:
wakeup_time = min(wakeup_time, self._pause_until)
# since this channel is paused, no need to look at the
# wakeup time of children nor eta jobs, as such jobs would not
# run anyway because they would end up in this paused channel
return wakeup_time
wakeup_time = self._queue.get_wakeup_time(wakeup_time)
for child in self.children.values():
wakeup_time = child.get_wakeup_time(wakeup_time)
return wakeup_time
def split_strip(s, sep, maxsplit=-1):
"""Split string and strip each component.
>>> split_strip("foo: bar baz\\n: fred:", ":")
['foo', 'bar baz', 'fred', '']
"""
return [x.strip() for x in s.split(sep, maxsplit)]
class ChannelManager(object):
"""High level interface for channels
This class handles:
* configuration of channels
* high level api to create and remove jobs (notify, remove_job, remove_db)
* get jobs to run
Here is how the runner will use it.
Let's create a channel manager and configure it.
>>> from pprint import pprint as pp
>>> cm = ChannelManager()
>>> cm.simple_configure('root:4,A:4,B:1')
>>> db = 'db'
Add a few jobs in channel A with priority 10
>>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A3', 3, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A4', 4, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A5', 5, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A6', 6, 0, 10, None, 'pending')
Add a few jobs in channel B with priority 5
>>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'pending')
>>> cm.notify(db, 'B', 'B2', 2, 0, 5, None, 'pending')
We must now run one job from queue B which has a capacity of 1
and 3 jobs from queue A so the root channel capacity of 4 is filled.
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob B1>, <ChannelJob A1>, <ChannelJob A2>, <ChannelJob A3>]
Job A2 is done. Next job to run is A5, even if we have
higher priority job in channel B, because channel B has a capacity of 1.
>>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'done')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob A4>]
Job B1 is done. Next job to run is B2 because it has higher priority.
>>> cm.notify(db, 'B', 'B1', 1, 0, 5, None, 'done')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob B2>]
Let's say A1 is done and A6 gets a higher priority. A6 will run next.
>>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'done')
>>> cm.notify(db, 'A', 'A6', 6, 0, 5, None, 'pending')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob A6>]
Let's test the throttling mechanism. Configure a 2 seconds delay
on channel A, end enqueue two jobs.
>>> cm = ChannelManager()
>>> cm.simple_configure('root:4,A:4:throttle=2')
>>> cm.notify(db, 'A', 'A1', 1, 0, 10, None, 'pending')
>>> cm.notify(db, 'A', 'A2', 2, 0, 10, None, 'pending')
We have only one job to run, because of the throttle.
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob A1>]
>>> cm.get_wakeup_time()
102
We have no job to run, because of the throttle.
>>> pp(list(cm.get_jobs_to_run(now=101)))
[]
>>> cm.get_wakeup_time()
102
2 seconds later, we can run the other job (even though the first one
is still running, because we have enough capacity).
>>> pp(list(cm.get_jobs_to_run(now=102)))
[<ChannelJob A2>]
>>> cm.get_wakeup_time()
104
Let's test throttling in combination with a queue reaching full capacity.
>>> cm = ChannelManager()
>>> cm.simple_configure('root:4,T:2:throttle=2')
>>> cm.notify(db, 'T', 'T1', 1, 0, 10, None, 'pending')
>>> cm.notify(db, 'T', 'T2', 2, 0, 10, None, 'pending')
>>> cm.notify(db, 'T', 'T3', 3, 0, 10, None, 'pending')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob T1>]
>>> pp(list(cm.get_jobs_to_run(now=102)))
[<ChannelJob T2>]
Channel is now full, so no job to run even though throttling
delay is over.
>>> pp(list(cm.get_jobs_to_run(now=103)))
[]
>>> cm.get_wakeup_time() # no wakeup time, since queue is full
0
>>> pp(list(cm.get_jobs_to_run(now=104)))
[]
>>> cm.get_wakeup_time() # queue is still full
0
>>> cm.notify(db, 'T', 'T1', 1, 0, 10, None, 'done')
>>> pp(list(cm.get_jobs_to_run(now=105)))
[<ChannelJob T3>]
>>> cm.get_wakeup_time() # queue is full
0
>>> cm.notify(db, 'T', 'T2', 1, 0, 10, None, 'done')
>>> cm.get_wakeup_time()
107
Test wakeup time behaviour in presence of eta.
>>> cm = ChannelManager()
>>> cm.simple_configure('root:4,E:1')
>>> cm.notify(db, 'E', 'E1', 1, 0, 10, None, 'pending')
>>> cm.notify(db, 'E', 'E2', 2, 0, 10, None, 'pending')
>>> cm.notify(db, 'E', 'E3', 3, 0, 10, None, 'pending')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob E1>]
>>> pp(list(cm.get_jobs_to_run(now=101)))
[]
>>> cm.notify(db, 'E', 'E1', 1, 0, 10, 105, 'pending')
>>> cm.get_wakeup_time() # wakeup at eta
105
>>> pp(list(cm.get_jobs_to_run(now=102))) # but there is capacity
[<ChannelJob E2>]
>>> pp(list(cm.get_jobs_to_run(now=106))) # no capacity anymore
[]
>>> cm.get_wakeup_time() # no timed wakeup because no capacity
0
>>> cm.notify(db, 'E', 'E2', 1, 0, 10, None, 'done')
>>> cm.get_wakeup_time()
105
>>> pp(list(cm.get_jobs_to_run(now=107))) # no capacity anymore
[<ChannelJob E1>]
>>> cm.get_wakeup_time()
0
Test wakeup time behaviour in a sequential queue.
>>> cm = ChannelManager()
>>> cm.simple_configure('root:4,S:1:sequential')
>>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'pending')
>>> cm.notify(db, 'S', 'S2', 2, 0, 10, None, 'pending')
>>> cm.notify(db, 'S', 'S3', 3, 0, 10, None, 'pending')
>>> pp(list(cm.get_jobs_to_run(now=100)))
[<ChannelJob S1>]
>>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'failed')
>>> pp(list(cm.get_jobs_to_run(now=101)))
[]
>>> cm.notify(db, 'S', 'S2', 2, 0, 10, 105, 'pending')
>>> pp(list(cm.get_jobs_to_run(now=102)))
[]
No wakeup time because due to eta, because the sequential queue
is waiting for a failed job.
>>> cm.get_wakeup_time()
0
>>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'pending')
>>> cm.get_wakeup_time()
105
>>> pp(list(cm.get_jobs_to_run(now=102)))
[<ChannelJob S1>]
>>> pp(list(cm.get_jobs_to_run(now=103)))
[]
>>> cm.notify(db, 'S', 'S1', 1, 0, 10, None, 'done')
At this stage, we have S2 with an eta of 105 and since the
queue is sequential, we wait for it.
>>> pp(list(cm.get_jobs_to_run(now=103)))
[]
>>> pp(list(cm.get_jobs_to_run(now=105)))
[<ChannelJob S2>]
>>> cm.notify(db, 'S', 'S2', 2, 0, 10, 105, 'done')
>>> pp(list(cm.get_jobs_to_run(now=105)))
[<ChannelJob S3>]
>>> cm.notify(db, 'S', 'S3', 3, 0, 10, None, 'done')
>>> pp(list(cm.get_jobs_to_run(now=105)))
[]
"""
def __init__(self):
self._jobs_by_uuid = WeakValueDictionary()
self._root_channel = Channel(name="root", parent=None, capacity=1)
self._channels_by_name = WeakValueDictionary(root=self._root_channel)
@classmethod
def parse_simple_config(cls, config_string):
"""Parse a simple channels configuration string.
The general form is as follow:
channel(.subchannel)*(:capacity(:key(=value)?)*)? [, ...]
If capacity is absent, it defaults to 1.
If a key is present without value, it gets True as value.
When declaring subchannels, the root channel may be omitted
(ie sub:4 is the same as root.sub:4).
Returns a list of channel configuration dictionaries.
>>> from pprint import pprint as pp
>>> pp(ChannelManager.parse_simple_config('root:4'))
[{'capacity': 4, 'name': 'root'}]
>>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2'))
[{'capacity': 4, 'name': 'root'}, {'capacity': 2, 'name': 'root.sub'}]
>>> pp(ChannelManager.parse_simple_config('root:4,root.sub:2:'
... 'sequential:k=v'))
[{'capacity': 4, 'name': 'root'},
{'capacity': 2, 'k': 'v', 'name': 'root.sub', 'sequential': True}]
>>> pp(ChannelManager.parse_simple_config('root'))
[{'capacity': 1, 'name': 'root'}]
>>> pp(ChannelManager.parse_simple_config('sub:2'))
[{'capacity': 2, 'name': 'sub'}]
It ignores whitespace around values, and drops empty entries which
would be generated by trailing commas, or commented lines on the Odoo
config file.
>>> pp(ChannelManager.parse_simple_config('''
... root : 4,
... ,
... foo bar:1: k=va lue,
... '''))
[{'capacity': 4, 'name': 'root'},
{'capacity': 1, 'k': 'va lue', 'name': 'foo bar'}]
It's also possible to replace commas with line breaks, which is more
readable if the channel configuration comes from the odoo config file.
>>> pp(ChannelManager.parse_simple_config('''
... root : 4
... foo bar:1: k=va lue
... baz
... '''))
[{'capacity': 4, 'name': 'root'},
{'capacity': 1, 'k': 'va lue', 'name': 'foo bar'},
{'capacity': 1, 'name': 'baz'}]
"""
res = []
config_string = config_string.replace("\n", ",")
for channel_config_string in split_strip(config_string, ","):
if not channel_config_string:
# ignore empty entries (commented lines, trailing commas)
continue
config = {}
config_items = split_strip(channel_config_string, ":")
name = config_items[0]
if not name:
raise ValueError(
"Invalid channel config %s: missing channel name" % config_string
)
config["name"] = name
if len(config_items) > 1:
capacity = config_items[1]
try:
config["capacity"] = int(capacity)
except Exception as ex:
raise ValueError(
"Invalid channel config %s: "
"invalid capacity %s" % (config_string, capacity)
) from ex
for config_item in config_items[2:]:
kv = split_strip(config_item, "=")
if len(kv) == 1:
k, v = kv[0], True
elif len(kv) == 2:
k, v = kv
else:
raise ValueError(
"Invalid channel config %s: "
"incorrect config item %s" % (config_string, config_item)
)
if k in config:
raise ValueError(
"Invalid channel config %s: "
"duplicate key %s" % (config_string, k)
)
config[k] = v
else:
config["capacity"] = 1
res.append(config)
return res
def simple_configure(self, config_string):
"""Configure the channel manager from a simple configuration string
>>> cm = ChannelManager()
>>> c = cm.get_channel_by_name('root')
>>> c.capacity
1
>>> cm.simple_configure('root:4,autosub.sub:2,seq:1:sequential')
>>> cm.get_channel_by_name('root').capacity
4
>>> cm.get_channel_by_name('root').sequential
False
>>> cm.get_channel_by_name('root.autosub').capacity
>>> cm.get_channel_by_name('root.autosub.sub').capacity
2
>>> cm.get_channel_by_name('root.autosub.sub').sequential
False
>>> cm.get_channel_by_name('autosub.sub').capacity
2
>>> cm.get_channel_by_name('seq').capacity
1
>>> cm.get_channel_by_name('seq').sequential
True
"""
for config in ChannelManager.parse_simple_config(config_string):
self.get_channel_from_config(config)
def get_channel_from_config(self, config):
"""Return a Channel object from a parsed configuration.
If the channel does not exist it is created.
The configuration is applied on the channel before returning it.
If some of the parent channels are missing when creating a subchannel,
the parent channels are auto created with an infinite capacity
(except for the root channel, which defaults to a capacity of 1
when not configured explicity).
"""
channel = self.get_channel_by_name(config["name"], autocreate=True)
channel.configure(config)
_logger.info("Configured channel: %s", channel)
return channel
def get_channel_by_name(
self, channel_name, autocreate=False, parent_fallback=False
):
"""Return a Channel object by its name.
If it does not exist and autocreate is True, it is created
with a default configuration and inserted in the Channels structure.
If autocreate is False and the channel does not exist, an exception
is raised.
>>> cm = ChannelManager()
>>> c = cm.get_channel_by_name('root', autocreate=False)
>>> c.name
'root'
>>> c.fullname
'root'
>>> c = cm.get_channel_by_name('root.sub', autocreate=True)
>>> c.name
'sub'
>>> c.fullname
'root.sub'
>>> c = cm.get_channel_by_name('sub', autocreate=True)
>>> c.name
'sub'
>>> c.fullname
'root.sub'
>>> c = cm.get_channel_by_name('autosub.sub', autocreate=True)
>>> c.name
'sub'
>>> c.fullname
'root.autosub.sub'
>>> c = cm.get_channel_by_name(None)
>>> c.fullname
'root'
>>> c = cm.get_channel_by_name('root.sub')
>>> c.fullname
'root.sub'
>>> c = cm.get_channel_by_name('sub')
>>> c.fullname
'root.sub'
>>> c = cm.get_channel_by_name('root.sub.not.configured', parent_fallback=True)
>>> c.fullname
'root.sub.sub.not.configured'
"""
if not channel_name or channel_name == self._root_channel.name:
return self._root_channel
if not channel_name.startswith(self._root_channel.name + "."):
channel_name = self._root_channel.name + "." + channel_name
if channel_name in self._channels_by_name:
return self._channels_by_name[channel_name]
if not autocreate and not parent_fallback:
raise ChannelNotFound("Channel %s not found" % channel_name)
parent = self._root_channel
if parent_fallback:
# Look for first direct parent w/ config.
# Eg: `root.edi.foo.baz` will falback on `root.edi.foo`
# or `root.edi` or `root` in sequence
parent_name = channel_name
while True:
parent_name = parent_name.rsplit(".", 1)[:-1][0]
if parent_name == self._root_channel.name:
break
if parent_name in self._channels_by_name:
parent = self._channels_by_name[parent_name]
_logger.debug(
"%s has no specific configuration: using %s",
channel_name,
parent_name,
)
break
for subchannel_name in channel_name.split(".")[1:]:
subchannel = parent.get_subchannel_by_name(subchannel_name)
if not subchannel:
subchannel = Channel(subchannel_name, parent, capacity=None)
self._channels_by_name[subchannel.fullname] = subchannel
parent = subchannel
return parent
def notify(
self, db_name, channel_name, uuid, seq, date_created, priority, eta, state
):
channel = self.get_channel_by_name(channel_name, parent_fallback=True)
job = self._jobs_by_uuid.get(uuid)
if job:
# db_name is invariant
assert job.db_name == db_name
# date_created is invariant
assert job.date_created == date_created
# if one of the job properties that influence
# scheduling order has changed, we remove the job
# from the queues and create a new job object
if (
seq != job.seq
or priority != job.priority
or eta != job.eta
or channel != job.channel
):
_logger.debug("job %s properties changed, rescheduling it", uuid)
self.remove_job(uuid)
job = None
if not job:
job = ChannelJob(db_name, channel, uuid, seq, date_created, priority, eta)
self._jobs_by_uuid[uuid] = job
# state transitions
if not state or state in (DONE, CANCELLED):
job.channel.set_done(job)
elif state == PENDING:
job.channel.set_pending(job)
elif state in (ENQUEUED, STARTED):
job.channel.set_running(job)
elif state == FAILED:
job.channel.set_failed(job)
elif state == WAIT_DEPENDENCIES:
# wait until all parent jobs are done
pass
else:
_logger.error("unexpected state %s for job %s", state, job)
def remove_job(self, uuid):
job = self._jobs_by_uuid.get(uuid)
if job:
job.channel.remove(job)
del self._jobs_by_uuid[job.uuid]
def remove_db(self, db_name):
for job in list(self._jobs_by_uuid.values()):
if job.db_name == db_name:
job.channel.remove(job)
del self._jobs_by_uuid[job.uuid]
def get_jobs_to_run(self, now):
return self._root_channel.get_jobs_to_run(now)
def get_wakeup_time(self):
return self._root_channel.get_wakeup_time()
| 33.969416 | 36,653 |
209 |
py
|
PYTHON
|
15.0
|
import odoo
from .runner import QueueJobRunner
def main():
odoo.tools.config.parse_config()
runner = QueueJobRunner.from_environ_or_config()
runner.run()
if __name__ == "__main__":
main()
| 16.076923 | 209 |
429 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import models
class SetJobsToDone(models.TransientModel):
_inherit = "queue.requeue.job"
_name = "queue.jobs.to.done"
_description = "Set all selected jobs to done"
def set_done(self):
jobs = self.job_ids
jobs.button_done()
return {"type": "ir.actions.act_window_close"}
| 28.6 | 429 |
765 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import fields, models
class QueueRequeueJob(models.TransientModel):
_name = "queue.requeue.job"
_description = "Wizard to requeue a selection of jobs"
def _default_job_ids(self):
res = False
context = self.env.context
if context.get("active_model") == "queue.job" and context.get("active_ids"):
res = context["active_ids"]
return res
job_ids = fields.Many2many(
comodel_name="queue.job", string="Jobs", default=lambda r: r._default_job_ids()
)
def requeue(self):
jobs = self.job_ids
jobs.requeue()
return {"type": "ir.actions.act_window_close"}
| 30.6 | 765 |
531 |
py
|
PYTHON
|
15.0
|
# Copyright 2013-2020 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
from odoo import models
class SetJobsToCancelled(models.TransientModel):
_inherit = "queue.requeue.job"
_name = "queue.jobs.to.cancelled"
_description = "Cancel all selected jobs"
def set_cancelled(self):
jobs = self.job_ids.filtered(
lambda x: x.state in ("pending", "failed", "enqueued")
)
jobs.button_cancelled()
return {"type": "ir.actions.act_window_close"}
| 31.235294 | 531 |
9,787 |
py
|
PYTHON
|
15.0
|
# Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2013-2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
import random
import time
import traceback
from io import StringIO
from psycopg2 import OperationalError, errorcodes
from werkzeug.exceptions import BadRequest, Forbidden
from odoo import SUPERUSER_ID, _, api, http, registry, tools
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
from ..delay import chain, group
from ..exception import FailedJobError, NothingToDoJob, RetryableJobError
from ..job import ENQUEUED, Job
_logger = logging.getLogger(__name__)
PG_RETRY = 5 # seconds
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE = 5
class RunJobController(http.Controller):
def _try_perform_job(self, env, job):
"""Try to perform the job."""
job.set_started()
job.store()
env.cr.commit()
_logger.debug("%s started", job)
job.perform()
job.set_done()
job.store()
env["base"].flush()
env.cr.commit()
_logger.debug("%s done", job)
def _enqueue_dependent_jobs(self, env, job):
tries = 0
while True:
try:
job.enqueue_waiting()
except OperationalError as err:
# Automatically retry the typical transaction serialization
# errors
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
if tries >= DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE:
_logger.info(
"%s, maximum number of tries reached to update dependencies",
errorcodes.lookup(err.pgcode),
)
raise
wait_time = random.uniform(0.0, 2**tries)
tries += 1
_logger.info(
"%s, retry %d/%d in %.04f sec...",
errorcodes.lookup(err.pgcode),
tries,
DEPENDS_MAX_TRIES_ON_CONCURRENCY_FAILURE,
wait_time,
)
time.sleep(wait_time)
else:
break
@http.route("/queue_job/runjob", type="http", auth="none", save_session=False)
def runjob(self, db, job_uuid, **kw):
http.request.session.db = db
env = http.request.env(user=SUPERUSER_ID)
def retry_postpone(job, message, seconds=None):
job.env.clear()
with registry(job.env.cr.dbname).cursor() as new_cr:
job.env = api.Environment(new_cr, SUPERUSER_ID, {})
job.postpone(result=message, seconds=seconds)
job.set_pending(reset_retry=False)
job.store()
# ensure the job to run is in the correct state and lock the record
env.cr.execute(
"SELECT state FROM queue_job WHERE uuid=%s AND state=%s FOR UPDATE",
(job_uuid, ENQUEUED),
)
if not env.cr.fetchone():
_logger.warning(
"was requested to run job %s, but it does not exist, "
"or is not in state %s",
job_uuid,
ENQUEUED,
)
return ""
job = Job.load(env, job_uuid)
assert job and job.state == ENQUEUED
try:
try:
self._try_perform_job(env, job)
except OperationalError as err:
# Automatically retry the typical transaction serialization
# errors
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
_logger.debug("%s OperationalError, postponed", job)
raise RetryableJobError(
tools.ustr(err.pgerror, errors="replace"), seconds=PG_RETRY
) from err
except NothingToDoJob as err:
if str(err):
msg = str(err)
else:
msg = _("Job interrupted and set to Done: nothing to do.")
job.set_done(msg)
job.store()
env.cr.commit()
except RetryableJobError as err:
# delay the job later, requeue
retry_postpone(job, str(err), seconds=err.seconds)
_logger.debug("%s postponed", job)
# Do not trigger the error up because we don't want an exception
# traceback in the logs we should have the traceback when all
# retries are exhausted
env.cr.rollback()
return ""
except (FailedJobError, Exception) as orig_exception:
buff = StringIO()
traceback.print_exc(file=buff)
traceback_txt = buff.getvalue()
_logger.error(traceback_txt)
job.env.clear()
with registry(job.env.cr.dbname).cursor() as new_cr:
job.env = job.env(cr=new_cr)
vals = self._get_failure_values(job, traceback_txt, orig_exception)
job.set_failed(**vals)
job.store()
buff.close()
raise
_logger.debug("%s enqueue depends started", job)
self._enqueue_dependent_jobs(env, job)
_logger.debug("%s enqueue depends done", job)
return ""
def _get_failure_values(self, job, traceback_txt, orig_exception):
"""Collect relevant data from exception."""
exception_name = orig_exception.__class__.__name__
if hasattr(orig_exception, "__module__"):
exception_name = orig_exception.__module__ + "." + exception_name
exc_message = getattr(orig_exception, "name", str(orig_exception))
return {
"exc_info": traceback_txt,
"exc_name": exception_name,
"exc_message": exc_message,
}
# flake8: noqa: C901
@http.route("/queue_job/create_test_job", type="http", auth="user")
def create_test_job(
self,
priority=None,
max_retries=None,
channel=None,
description="Test job",
size=1,
failure_rate=0,
):
if not http.request.env.user.has_group("base.group_erp_manager"):
raise Forbidden(_("Access Denied"))
if failure_rate is not None:
try:
failure_rate = float(failure_rate)
except (ValueError, TypeError):
failure_rate = 0
if not (0 <= failure_rate <= 1):
raise BadRequest("failure_rate must be between 0 and 1")
if size is not None:
try:
size = int(size)
except (ValueError, TypeError):
size = 1
if priority is not None:
try:
priority = int(priority)
except ValueError:
priority = None
if max_retries is not None:
try:
max_retries = int(max_retries)
except ValueError:
max_retries = None
if size == 1:
return self._create_single_test_job(
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
failure_rate=failure_rate,
)
if size > 1:
return self._create_graph_test_jobs(
size,
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
failure_rate=failure_rate,
)
return ""
def _create_single_test_job(
self,
priority=None,
max_retries=None,
channel=None,
description="Test job",
size=1,
failure_rate=0,
):
delayed = (
http.request.env["queue.job"]
.with_delay(
priority=priority,
max_retries=max_retries,
channel=channel,
description=description,
)
._test_job(failure_rate=failure_rate)
)
return "job uuid: %s" % (delayed.db_record().uuid,)
TEST_GRAPH_MAX_PER_GROUP = 5
def _create_graph_test_jobs(
self,
size,
priority=None,
max_retries=None,
channel=None,
description="Test job",
failure_rate=0,
):
model = http.request.env["queue.job"]
current_count = 0
possible_grouping_methods = (chain, group)
tails = [] # we can connect new graph chains/groups to tails
root_delayable = None
while current_count < size:
jobs_count = min(
size - current_count, random.randint(1, self.TEST_GRAPH_MAX_PER_GROUP)
)
jobs = []
for __ in range(jobs_count):
current_count += 1
jobs.append(
model.delayable(
priority=priority,
max_retries=max_retries,
channel=channel,
description="%s #%d" % (description, current_count),
)._test_job(failure_rate=failure_rate)
)
grouping = random.choice(possible_grouping_methods)
delayable = grouping(*jobs)
if not root_delayable:
root_delayable = delayable
else:
tail_delayable = random.choice(tails)
tail_delayable.on_done(delayable)
tails.append(delayable)
root_delayable.delay()
return "graph uuid: %s" % (
list(root_delayable._head())[0]._generated_job.graph_uuid,
)
| 32.952862 | 9,787 |
725 |
py
|
PYTHON
|
15.0
|
import setuptools
with open('VERSION.txt', 'r') as f:
version = f.read().strip()
setuptools.setup(
name="odoo-addons-oca-queue",
description="Meta package for oca-queue Odoo addons",
version=version,
install_requires=[
'odoo-addon-base_export_async>=15.0dev,<15.1dev',
'odoo-addon-queue_job>=15.0dev,<15.1dev',
'odoo-addon-queue_job_cron>=15.0dev,<15.1dev',
'odoo-addon-queue_job_cron_jobrunner>=15.0dev,<15.1dev',
'odoo-addon-queue_job_subscribe>=15.0dev,<15.1dev',
'odoo-addon-test_queue_job>=15.0dev,<15.1dev',
],
classifiers=[
'Programming Language :: Python',
'Framework :: Odoo',
'Framework :: Odoo :: 15.0',
]
)
| 31.521739 | 725 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
100 |
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667 | 100 |
475 |
py
|
PYTHON
|
15.0
|
{
"name": "Queue Job Cron Jobrunner",
"summary": "Run jobs without a dedicated JobRunner",
"version": "15.0.2.0.0",
"development_status": "Alpha",
"author": "Camptocamp SA, Odoo Community Association (OCA)",
"maintainers": ["ivantodorovich"],
"website": "https://github.com/OCA/queue",
"license": "AGPL-3",
"category": "Others",
"depends": ["queue_job"],
"data": [
"data/ir_cron.xml",
"views/ir_cron.xml",
],
}
| 29.6875 | 475 |
3,270 |
py
|
PYTHON
|
15.0
|
# Copyright 2022 Camptocamp SA (https://www.camptocamp.com).
# @author Iván Todorovich <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from datetime import timedelta
from freezegun import freeze_time
from odoo import fields
from odoo.tests.common import TransactionCase
from odoo.tools import mute_logger
class TestQueueJob(TransactionCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.cron = cls.env.ref("queue_job_cron_jobrunner.queue_job_cron")
# Cleanup triggers just in case
cls.env["ir.cron.trigger"].search([]).unlink()
def assertTriggerAt(self, at, message=None):
"""Ensures a cron trigger is created at the given time"""
return self.assertTrue(
self.env["ir.cron.trigger"].search([("call_at", "=", at)]),
message,
)
@freeze_time("2022-02-22 22:22:22")
def test_queue_job_cron_trigger(self):
"""Test that ir.cron triggers are created for every queue.job"""
job = self.env["res.partner"].with_delay().create({"name": "test"})
job_record = job.db_record()
self.assertTriggerAt(fields.Datetime.now(), "Trigger should've been created")
job_record.eta = fields.Datetime.now() + timedelta(hours=1)
self.assertTriggerAt(job_record.eta, "A new trigger should've been created")
@mute_logger("odoo.addons.queue_job_cron_jobrunner.models.queue_job")
def test_queue_job_process(self):
"""Test that jobs are processed by the queue job cron"""
# Create some jobs
job1 = self.env["res.partner"].with_delay().create({"name": "test"})
job1_record = job1.db_record()
job2 = self.env["res.partner"].with_delay().create(False)
job2_record = job2.db_record()
job3 = self.env["res.partner"].with_delay(eta=3600).create({"name": "Test"})
job3_record = job3.db_record()
# Run the job processing cron
self.env["queue.job"]._job_runner(commit=False)
# Check that the jobs were processed
self.assertEqual(job1_record.state, "done", "Processed OK")
self.assertEqual(job2_record.state, "failed", "Has errors")
self.assertEqual(job3_record.state, "pending", "Still pending, because of eta")
@freeze_time("2022-02-22 22:22:22")
def test_queue_job_cron_trigger_enqueue_dependencies(self):
"""Test that ir.cron execution enqueue waiting dependencies"""
delayable = self.env["res.partner"].delayable().create({"name": "test"})
delayable2 = self.env["res.partner"].delayable().create({"name": "test2"})
delayable.on_done(delayable2)
delayable.delay()
job_record = delayable._generated_job.db_record()
job_record_depends = delayable2._generated_job.db_record()
self.env["queue.job"]._job_runner(commit=False)
self.assertEqual(job_record.state, "done", "Processed OK")
# if the state is "waiting_dependencies", it means the "enqueue_waiting()"
# step has not been doen when the parent job has been done
self.assertEqual(job_record_depends.state, "done", "Processed OK")
| 46.042254 | 3,269 |
5,937 |
py
|
PYTHON
|
15.0
|
# Copyright 2022 Camptocamp SA (https://www.camptocamp.com).
# @author Iván Todorovich <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
import traceback
from io import StringIO
from psycopg2 import OperationalError
from odoo import _, api, models, tools
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
from odoo.addons.queue_job.controllers.main import PG_RETRY
from odoo.addons.queue_job.exception import (
FailedJobError,
NothingToDoJob,
RetryableJobError,
)
from odoo.addons.queue_job.job import Job
_logger = logging.getLogger(__name__)
class QueueJob(models.Model):
_inherit = "queue.job"
@api.model
def _acquire_one_job(self):
"""Acquire the next job to be run.
:returns: queue.job record (locked for update)
"""
# TODO: This method should respect channel priority and capacity,
# rather than just fetching them by creation date.
self.flush()
self.env.cr.execute(
"""
SELECT id
FROM queue_job
WHERE state = 'pending'
AND (eta IS NULL OR eta <= (now() AT TIME ZONE 'UTC'))
ORDER BY date_created DESC
LIMIT 1 FOR NO KEY UPDATE SKIP LOCKED
"""
)
row = self.env.cr.fetchone()
return self.browse(row and row[0])
def _process(self, commit=False):
"""Process the job"""
self.ensure_one()
job = Job._load_from_db_record(self)
# Set it as started
job.set_started()
job.store()
_logger.debug("%s started", job.uuid)
# TODO: Commit the state change so that the state can be read from the UI
# while the job is processing. However, doing this will release the
# lock on the db, so we need to find another way.
# if commit:
# self.flush()
# self.env.cr.commit()
# Actual processing
try:
try:
with self.env.cr.savepoint():
job.perform()
job.set_done()
job.store()
except OperationalError as err:
# Automatically retry the typical transaction serialization errors
if err.pgcode not in PG_CONCURRENCY_ERRORS_TO_RETRY:
raise
message = tools.ustr(err.pgerror, errors="replace")
job.postpone(result=message, seconds=PG_RETRY)
job.set_pending(reset_retry=False)
job.store()
_logger.debug("%s OperationalError, postponed", job)
except NothingToDoJob as err:
if str(err):
msg = str(err)
else:
msg = _("Job interrupted and set to Done: nothing to do.")
job.set_done(msg)
job.store()
except RetryableJobError as err:
# delay the job later, requeue
job.postpone(result=str(err), seconds=5)
job.set_pending(reset_retry=False)
job.store()
_logger.debug("%s postponed", job)
except (FailedJobError, Exception):
with StringIO() as buff:
traceback.print_exc(file=buff)
_logger.error(buff.getvalue())
job.set_failed(exc_info=buff.getvalue())
job.store()
if commit: # pragma: no cover
self.env["base"].flush()
self.env.cr.commit() # pylint: disable=invalid-commit
_logger.debug("%s enqueue depends started", job)
job.enqueue_waiting()
_logger.debug("%s enqueue depends done", job)
@api.model
def _job_runner(self, commit=True):
"""Short-lived job runner, triggered by async crons"""
job = self._acquire_one_job()
while job:
job._process(commit=commit)
job = self._acquire_one_job()
# TODO: If limit_time_real_cron is reached before all the jobs are done,
# the worker will be killed abruptly.
# Ideally, find a way to know if we're close to reaching this limit,
# stop processing, and trigger a new execution to continue.
#
# if job and limit_time_real_cron_reached_or_about_to_reach:
# self._cron_trigger()
# break
@api.model
def _cron_trigger(self, at=None):
"""Trigger the cron job runners
Odoo will prevent concurrent cron jobs from running.
So, to support parallel execution, we'd need to have (at least) the
same number of ir.crons records as cron workers.
All crons should be triggered at the same time.
"""
crons = self.env["ir.cron"].sudo().search([("queue_job_runner", "=", True)])
for cron in crons:
cron._trigger(at=at)
def _ensure_cron_trigger(self):
"""Create cron triggers for these jobs"""
records = self.filtered(lambda r: r.state == "pending")
if not records:
return
# Trigger immediate runs
immediate = any(not rec.eta for rec in records)
if immediate:
self._cron_trigger()
# Trigger delayed eta runs
delayed_etas = {rec.eta for rec in records if rec.eta}
if delayed_etas:
self._cron_trigger(at=list(delayed_etas))
@api.model_create_multi
def create(self, vals_list):
# When jobs are created, also create the cron trigger
records = super().create(vals_list)
records._ensure_cron_trigger()
return records
def write(self, vals):
# When a job state or eta changes, make sure a cron trigger is created
res = super().write(vals)
if "state" in vals or "eta" in vals:
self._ensure_cron_trigger()
return res
| 35.333333 | 5,936 |
397 |
py
|
PYTHON
|
15.0
|
# Copyright 2022 Camptocamp SA (https://www.camptocamp.com).
# @author Iván Todorovich <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class IrCron(models.Model):
_inherit = "ir.cron"
queue_job_runner = fields.Boolean(
help="If checked, the cron is considered to be a queue.job runner.",
)
| 30.461538 | 396 |
721 |
py
|
PYTHON
|
15.0
|
# @author Stéphane Bidoul <[email protected]>
# @author Sébastien BEAU <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Asynchronous Import",
"summary": "Import CSV files in the background",
"version": "14.0.1.0.1",
"author": "Akretion, ACSONE SA/NV, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/queue",
"category": "Generic Modules",
"depends": ["base_import", "queue_job"],
"data": ["data/queue_job_function_data.xml", "views/base_import_async.xml"],
"qweb": ["static/src/xml/import.xml"],
"installable": False,
"development_status": "Production/Stable",
}
| 39.944444 | 719 |
541 |
py
|
PYTHON
|
15.0
|
# Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, models
class QueueJob(models.Model):
""" Job status and result """
_inherit = "queue.job"
def _related_action_attachment(self):
res_id = self.kwargs.get("att_id")
action = {
"name": _("Attachment"),
"type": "ir.actions.act_window",
"res_model": "ir.attachment",
"view_mode": "form",
"res_id": res_id,
}
return action
| 25.761905 | 541 |
6,749 |
py
|
PYTHON
|
15.0
|
# Copyright 2014 ACSONE SA/NV (http://acsone.eu).
# Copyright 2013 Akretion (http://www.akretion.com).
# @author Stéphane Bidoul <[email protected]>
# @author Sébastien BEAU <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import base64
import csv
from io import BytesIO, StringIO, TextIOWrapper
from os.path import splitext
from odoo import _, api, models
from odoo.models import fix_import_export_id_paths
from odoo.addons.queue_job.exception import FailedJobError
# options defined in base_import/import.js
OPT_HAS_HEADER = "headers"
OPT_SEPARATOR = "separator"
OPT_QUOTING = "quoting"
OPT_ENCODING = "encoding"
# options defined in base_import_async/import.js
OPT_USE_QUEUE = "use_queue"
OPT_CHUNK_SIZE = "chunk_size"
# option not available in UI, but usable from scripts
OPT_PRIORITY = "priority"
INIT_PRIORITY = 100
DEFAULT_CHUNK_SIZE = 100
class BaseImportImport(models.TransientModel):
_inherit = "base_import.import"
def do(self, fields, columns, options, dryrun=False):
if dryrun or not options.get(OPT_USE_QUEUE):
# normal import
return super().do(fields, columns, options, dryrun=dryrun)
# asynchronous import
try:
data, import_fields = self._convert_import_data(fields, options)
# Parse date and float field
data = self._parse_import_data(data, import_fields, options)
except ValueError as e:
return {"messages": [{"type": "error", "message": str(e), "record": False}]}
# get the translated model name to build
# a meaningful job description
search_result = self.env["ir.model"].name_search(self.res_model, operator="=")
if search_result:
translated_model_name = search_result[0][1]
else:
translated_model_name = self._description
description = _("Import %s from file %s") % (
translated_model_name,
self.file_name,
)
attachment = self._create_csv_attachment(
import_fields, data, options, self.file_name
)
delayed_job = self.with_delay(description=description)._split_file(
model_name=self.res_model,
translated_model_name=translated_model_name,
attachment=attachment,
options=options,
file_name=self.file_name,
)
self._link_attachment_to_job(delayed_job, attachment)
return []
def _link_attachment_to_job(self, delayed_job, attachment):
queue_job = self.env["queue.job"].search(
[("uuid", "=", delayed_job.uuid)], limit=1
)
attachment.write({"res_model": "queue.job", "res_id": queue_job.id})
@api.returns("ir.attachment")
def _create_csv_attachment(self, fields, data, options, file_name):
# write csv
f = StringIO()
writer = csv.writer(
f,
delimiter=str(options.get(OPT_SEPARATOR)) or ",",
quotechar=str(options.get(OPT_QUOTING)),
)
encoding = options.get(OPT_ENCODING) or "utf-8"
writer.writerow(fields)
for row in data:
writer.writerow(row)
# create attachment
datas = base64.encodebytes(f.getvalue().encode(encoding))
attachment = self.env["ir.attachment"].create(
{"name": file_name, "datas": datas}
)
return attachment
def _read_csv_attachment(self, attachment, options):
decoded_datas = base64.decodebytes(attachment.datas)
encoding = options.get(OPT_ENCODING) or "utf-8"
f = TextIOWrapper(BytesIO(decoded_datas), encoding=encoding)
reader = csv.reader(
f,
delimiter=str(options.get(OPT_SEPARATOR)) or ",",
quotechar=str(options.get(OPT_QUOTING)),
)
fields = next(reader)
data = [row for row in reader]
return fields, data
@staticmethod
def _extract_chunks(model_obj, fields, data, chunk_size):
"""Split the data on record boundaries, in chunks of minimum chunk_size"""
fields = list(map(fix_import_export_id_paths, fields))
row_from = 0
for rows in model_obj._extract_records(fields, data):
rows = rows[1]["rows"]
if rows["to"] - row_from + 1 >= chunk_size:
yield row_from, rows["to"]
row_from = rows["to"] + 1
if row_from < len(data):
yield row_from, len(data) - 1
def _split_file(
self,
model_name,
translated_model_name,
attachment,
options,
file_name="file.csv",
):
""" Split a CSV attachment in smaller import jobs """
model_obj = self.env[model_name]
fields, data = self._read_csv_attachment(attachment, options)
padding = len(str(len(data)))
priority = options.get(OPT_PRIORITY, INIT_PRIORITY)
if options.get(OPT_HAS_HEADER):
header_offset = 1
else:
header_offset = 0
chunk_size = options.get(OPT_CHUNK_SIZE) or DEFAULT_CHUNK_SIZE
for row_from, row_to in self._extract_chunks(
model_obj, fields, data, chunk_size
):
chunk = str(priority - INIT_PRIORITY).zfill(padding)
description = _("Import %s from file %s - #%s - lines %s to %s")
description = description % (
translated_model_name,
file_name,
chunk,
row_from + 1 + header_offset,
row_to + 1 + header_offset,
)
# create a CSV attachment and enqueue the job
root, ext = splitext(file_name)
attachment = self._create_csv_attachment(
fields,
data[row_from : row_to + 1],
options,
file_name=root + "-" + chunk + ext,
)
delayed_job = self.with_delay(
description=description, priority=priority
)._import_one_chunk(
model_name=model_name, attachment=attachment, options=options
)
self._link_attachment_to_job(delayed_job, attachment)
priority += 1
def _import_one_chunk(self, model_name, attachment, options):
model_obj = self.env[model_name]
fields, data = self._read_csv_attachment(attachment, options)
result = model_obj.load(fields, data)
error_message = [
message["message"]
for message in result["messages"]
if message["type"] == "error"
]
if error_message:
raise FailedJobError("\n".join(error_message))
return result
| 37.071429 | 6,747 |
666 |
py
|
PYTHON
|
15.0
|
# Copyright 2014 ACSONE SA/NV (http://acsone.eu)
# @author Stéphane Bidoul <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Test suite for base_import_async",
"version": "14.0.1.0.1",
"author": "ACSONE SA/NV, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/queue",
"category": "Generic Modules",
"summary": """Test suite for base_import_async.
Normally you don't need to install this.
""",
"depends": ["base_import_async", "account"],
"data": [],
"installable": False,
"development_status": "Production/Stable",
}
| 33.25 | 665 |
8,070 |
py
|
PYTHON
|
15.0
|
# Copyright 2014 ACSONE SA/NV (http://acsone.eu)
# @author Stéphane Bidoul <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import os
import odoo.tests.common as common
from odoo.addons.base_import_async.models.base_import_import import (
OPT_CHUNK_SIZE,
OPT_HAS_HEADER,
OPT_QUOTING,
OPT_SEPARATOR,
OPT_USE_QUEUE,
)
from odoo.addons.queue_job.job import Job
class TestBaseImportAsync(common.SavepointCase):
FIELDS = [
"date",
"journal_id/id",
"name",
"ref",
"line_ids/account_id/id",
"line_ids/name",
"line_ids/debit",
"line_ids/credit",
"line_ids/partner_id/id",
]
OPTIONS = {
OPT_SEPARATOR: ",",
OPT_QUOTING: '"',
OPT_HAS_HEADER: True,
"date_format": "%Y-%m-%d",
}
@classmethod
def setUpClass(cls):
super().setUpClass()
# add xmlids that will be used in the test CSV file
cls.env["ir.model.data"]._update_xmlids(
[
{
"xml_id": "test_base_import_async.testjournal_xmlid",
"record": cls.env["account.journal"].search(
[("code", "=", "CABA")]
),
},
{
"xml_id": "test_base_import_async.a_recv_xmlid",
"record": cls.env["account.account"].search(
[("code", "=", "121000")]
),
},
{
"xml_id": "test_base_import_async.a_sale_xmlid",
"record": cls.env["account.account"].search(
[("code", "=", "400000")]
),
},
]
)
cls.import_obj = cls.env["base_import.import"]
cls.move_obj = cls.env["account.move"]
cls.job_obj = cls.env["queue.job"]
def _read_test_file(self, file_name):
file_name = os.path.join(os.path.dirname(__file__), file_name)
with open(file_name) as opened:
return opened.read()
def _do_import(self, file_name, use_queue, chunk_size=None):
data = self._read_test_file(file_name)
importer = self.import_obj.create(
{"res_model": "account.move", "file": data, "file_name": file_name}
)
options = dict(self.OPTIONS)
options[OPT_USE_QUEUE] = use_queue
options[OPT_CHUNK_SIZE] = chunk_size
return importer.do(self.FIELDS, self.FIELDS, options)
def _check_import_result(self):
move_count = self.move_obj.search_count(
[("name", "in", ("TEST-1", "TEST-2", "TEST-3"))]
)
self.assertEqual(move_count, 3)
def test_normal_import(self):
""" Test the standard import still works. """
res = self._do_import("account.move.csv", use_queue=False)
self.assertFalse(res["messages"], repr(res))
self._check_import_result()
def test_async_import(self):
""" Basic asynchronous import test with default large chunk size. """
res = self._do_import("account.move.csv", use_queue=True)
self.assertFalse(res, repr(res))
# no moves should be created yet
move_count = self.move_obj.search(
[("name", "in", ("TEST-1", "TEST-2", "TEST-3"))]
)
self.assertEqual(len(move_count), 0)
# but we must have one job to split the file
split_job = self.job_obj.search([])
self.assertEqual(len(split_job), 1)
# job names are important
self.assertEqual(
split_job.name, "Import Journal Entry from file account.move.csv"
)
# perform job
Job.load(self.env, split_job.uuid).perform()
# check one job has been generated to load the file (one chunk)
load_job = self.job_obj.search([("id", "!=", split_job.id)])
self.assertEqual(len(load_job), 1)
self.assertEqual(
load_job.name,
"Import Journal Entry from file account.move.csv - " "#0 - lines 2 to 10",
)
# perform job
Job.load(self.env, load_job.uuid).perform()
self._check_import_result()
def test_async_import_small_misaligned_chunks(self):
""" Chunk size larger than record. """
res = self._do_import("account.move.csv", use_queue=True, chunk_size=4)
self.assertFalse(res, repr(res))
# but we must have one job to split the file
split_job = self.job_obj.search([])
self.assertEqual(len(split_job), 1)
# perform job
Job.load(self.env, split_job.uuid).perform()
# check one job has been generated to load the file (two chunks)
load_jobs = self.job_obj.search([("id", "!=", split_job.id)], order="name")
self.assertEqual(len(load_jobs), 2)
self.assertEqual(
load_jobs[0].name,
"Import Journal Entry from file account.move.csv - " "#0 - lines 2 to 7",
)
self.assertEqual(
load_jobs[1].name,
"Import Journal Entry from file account.move.csv - " "#1 - lines 8 to 10",
)
# perform job
Job.load(self.env, load_jobs[0].uuid).perform()
Job.load(self.env, load_jobs[1].uuid).perform()
self._check_import_result()
def test_async_import_smaller_misaligned_chunks(self):
""" Chunk size smaller than record. """
res = self._do_import("account.move.csv", use_queue=True, chunk_size=2)
self.assertFalse(res, repr(res))
# but we must have one job to split the file
split_job = self.job_obj.search([])
self.assertEqual(len(split_job), 1)
# perform job
Job.load(self.env, split_job.uuid).perform()
# check one job has been generated to load the file (three chunks)
load_jobs = self.job_obj.search([("id", "!=", split_job.id)], order="name")
self.assertEqual(len(load_jobs), 3)
self.assertEqual(
load_jobs[0].name,
"Import Journal Entry from file account.move.csv - " "#0 - lines 2 to 4",
)
self.assertEqual(
load_jobs[1].name,
"Import Journal Entry from file account.move.csv - " "#1 - lines 5 to 7",
)
self.assertEqual(
load_jobs[2].name,
"Import Journal Entry from file account.move.csv - " "#2 - lines 8 to 10",
)
# perform job
Job.load(self.env, load_jobs[0].uuid).perform()
Job.load(self.env, load_jobs[1].uuid).perform()
Job.load(self.env, load_jobs[2].uuid).perform()
self._check_import_result()
def test_async_import_smaller_aligned_chunks(self):
"""Chunks aligned on record boundaries. Last chunk ends exactly at file end."""
res = self._do_import("account.move.csv", use_queue=True, chunk_size=3)
self.assertFalse(res, repr(res))
# but we must have one job to split the file
split_job = self.job_obj.search([])
self.assertEqual(len(split_job), 1)
# perform job
Job.load(self.env, split_job.uuid).perform()
# check one job has been generated to load the file (three chunks)
load_jobs = self.job_obj.search([("id", "!=", split_job.id)], order="name")
self.assertEqual(len(load_jobs), 3)
self.assertEqual(
load_jobs[0].name,
"Import Journal Entry from file account.move.csv - " "#0 - lines 2 to 4",
)
self.assertEqual(
load_jobs[1].name,
"Import Journal Entry from file account.move.csv - " "#1 - lines 5 to 7",
)
self.assertEqual(
load_jobs[2].name,
"Import Journal Entry from file account.move.csv - " "#2 - lines 8 to 10",
)
# perform job
Job.load(self.env, load_jobs[0].uuid).perform()
Job.load(self.env, load_jobs[1].uuid).perform()
Job.load(self.env, load_jobs[2].uuid).perform()
self._check_import_result()
| 38.793269 | 8,069 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.