diff --git a/queue_job_batch_size/README.rst b/queue_job_batch_size/README.rst new file mode 100644 index 0000000000..f6cbdad01c --- /dev/null +++ b/queue_job_batch_size/README.rst @@ -0,0 +1,98 @@ +==================== +Queue Job Batch Size +==================== + +.. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! This file is generated by oca-gen-addon-readme !! + !! changes will be overwritten. !! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! source digest: sha256:3b2b67b2f9e534bfe1b21f54456b2c5f02088b0dbf652e26b57d9704678fef2c + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +.. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png + :target: https://odoo-community.org/page/development-status + :alt: Beta +.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png + :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html + :alt: License: AGPL-3 +.. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fqueue-lightgray.png?logo=github + :target: https://github.com/OCA/queue/tree/14.0/queue_job_batch_size + :alt: OCA/queue +.. |badge4| image:: https://img.shields.io/badge/weblate-Translate%20me-F47D42.png + :target: https://translation.odoo-community.org/projects/queue-14-0/queue-14-0-queue_job_batch_size + :alt: Translate me on Weblate +.. |badge5| image:: https://img.shields.io/badge/runboat-Try%20me-875A7B.png + :target: https://runboat.odoo-community.org/builds?repo=OCA/queue&target_branch=14.0 + :alt: Try me on Runboat + +|badge1| |badge2| |badge3| |badge4| |badge5| + +This module allows to seemlessly split a big job into smaller jobs. + +It uses ``queue_job_batch`` to group the created jobs into a batch. + +Example: + +.. code-block:: python + + class ResPartner(models.Model): + # ... + + def copy_all_partners(self): + # Duplicate all partners in batches of 30: + self.with_delay(batch_size=30).copy() + + # ... + self.env['res.partner'].search([], limit=1000).copy_all_partners() + +This will create 34 jobs, each one copying 30 partners (except the last one which will copy 10) and will group them into a batch. + +Instead of ``batch_size``, one can also use ``batch_count`` to specify the number of batches to create instead. + + + +**Table of contents** + +.. contents:: + :local: + +Bug Tracker +=========== + +Bugs are tracked on `GitHub Issues `_. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +`feedback `_. + +Do not contact contributors directly about support or help with technical issues. + +Credits +======= + +Authors +~~~~~~~ + +* Akretion + +Contributors +~~~~~~~~~~~~ + +* Florian Mounier + +Maintainers +~~~~~~~~~~~ + +This module is maintained by the OCA. + +.. image:: https://odoo-community.org/logo.png + :alt: Odoo Community Association + :target: https://odoo-community.org + +OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use. + +This module is part of the `OCA/queue `_ project on GitHub. + +You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute. diff --git a/queue_job_batch_size/__init__.py b/queue_job_batch_size/__init__.py new file mode 100644 index 0000000000..0650744f6b --- /dev/null +++ b/queue_job_batch_size/__init__.py @@ -0,0 +1 @@ +from . import models diff --git a/queue_job_batch_size/__manifest__.py b/queue_job_batch_size/__manifest__.py new file mode 100644 index 0000000000..e87d7ebbaa --- /dev/null +++ b/queue_job_batch_size/__manifest__.py @@ -0,0 +1,19 @@ +# Copyright 2023 Akretion (http://www.akretion.com). +# @author Florian Mounier +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). + +{ + "name": "Queue Job Batch Size", + "summary": "Add batch size / steps property to queue jobs to" + " automatically split them", + "version": "14.0.1.0.0", + "author": "Akretion,Odoo Community Association (OCA)", + "website": "https://github.com/OCA/queue", + "category": "Generic Modules", + "license": "AGPL-3", + "application": False, + "installable": True, + "depends": [ + "queue_job_batch", + ], +} diff --git a/queue_job_batch_size/models/__init__.py b/queue_job_batch_size/models/__init__.py new file mode 100644 index 0000000000..0e44449338 --- /dev/null +++ b/queue_job_batch_size/models/__init__.py @@ -0,0 +1 @@ +from . import base diff --git a/queue_job_batch_size/models/base.py b/queue_job_batch_size/models/base.py new file mode 100644 index 0000000000..227dc28707 --- /dev/null +++ b/queue_job_batch_size/models/base.py @@ -0,0 +1,132 @@ +# Copyright 2023 Akretion (http://www.akretion.com). +# @author Florian Mounier +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). + +import operator +from functools import reduce + +from odoo import models + +from odoo.addons.queue_job.delay import Delayable + + +class DelayableBatchRecordset(object): + __slots__ = ("delayables", "batch") + + def __init__( + self, + recordset, + priority=None, + eta=None, + max_retries=None, + description=None, + channel=None, + identity_key=None, + batch_size=None, + batch_count=None, + ): + total_records = len(recordset) + if batch_size: + batch_count = 1 + total_records // batch_size + else: + batch_size = total_records // batch_count + if total_records % batch_count: + batch_size += 1 + + description = description or "__EMPTY__" + self.batch = recordset.env["queue.job.batch"].get_new_batch( + "Batch of %s" % description + ) + self.delayables = [] + for batch in range(batch_count): + start = batch * batch_size + end = min((batch + 1) * batch_size, total_records) + if end > start: + self.delayables.append( + Delayable( + recordset[start:end].with_context(job_batch=self.batch), + priority=priority or 12, # Lower priority than default + # to let queue_job_batch check the state + eta=eta, + max_retries=max_retries, + description="%s (batch %d/%d)" + % (description, batch + 1, batch_count), + channel=channel, + identity_key=identity_key, + ) + ) + + @property + def recordset(self): + return reduce(operator.or_, self.delayables, set()).recordset + + def __getattr__(self, name): + def _delay_delayable(*args, **kwargs): + for delayable in self.delayables: + func = getattr(delayable, name) + + # FIXME: Find a better way to set default description + if "__EMPTY__" in delayable.description: + description = ( + func.__doc__.splitlines()[0].strip() + if func.__doc__ + else "{}.{}".format(delayable.recordset._name, name) + ) + delayable.description = delayable.description.replace( + "__EMPTY__", description + ) + if "__EMPTY__" in self.batch.name: + self.batch.name = self.batch.name.replace( + "__EMPTY__", description + ) + func(*args, **kwargs).delay() + self.batch.enqueue() + return [delayable._generated_job for delayable in self.delayables] + + return _delay_delayable + + def __str__(self): + recordset = self.delayables[0].recordset + return "DelayableBatchRecordset(%s%s)" % ( + recordset._name, + getattr(recordset, "_ids", ""), + ) + + __repr__ = __str__ + + +class Base(models.AbstractModel): + _inherit = "base" + + def with_delay( + self, + priority=None, + eta=None, + max_retries=None, + description=None, + channel=None, + identity_key=None, + batch_size=None, + batch_count=None, + ): + if batch_size or batch_count: + return DelayableBatchRecordset( + self, + priority=priority, + eta=eta, + max_retries=max_retries, + description=description, + channel=channel, + identity_key=identity_key, + batch_size=batch_size, + batch_count=batch_count, + ) + + return super().with_delay( + priority=priority, + eta=eta, + max_retries=max_retries, + description=description, + channel=channel, + identity_key=identity_key, + ) diff --git a/queue_job_batch_size/readme/CONTRIBUTORS.rst b/queue_job_batch_size/readme/CONTRIBUTORS.rst new file mode 100644 index 0000000000..8d00d9f04b --- /dev/null +++ b/queue_job_batch_size/readme/CONTRIBUTORS.rst @@ -0,0 +1 @@ +* Florian Mounier diff --git a/queue_job_batch_size/readme/DESCRIPTION.rst b/queue_job_batch_size/readme/DESCRIPTION.rst new file mode 100644 index 0000000000..391f53bb1c --- /dev/null +++ b/queue_job_batch_size/readme/DESCRIPTION.rst @@ -0,0 +1,21 @@ +This module allows to seemlessly split a big job into smaller jobs. + +It uses ``queue_job_batch`` to group the created jobs into a batch. + +Example: + +.. code-block:: python + + class ResPartner(models.Model): + # ... + + def copy_all_partners(self): + # Duplicate all partners in batches of 30: + self.with_delay(batch_size=30).copy() + + # ... + self.env['res.partner'].search([], limit=1000).copy_all_partners() + +This will create 34 jobs, each one copying 30 partners (except the last one which will copy 10) and will group them into a batch. + +Instead of ``batch_size``, one can also use ``batch_count`` to specify the number of batches to create instead. diff --git a/queue_job_batch_size/static/description/index.html b/queue_job_batch_size/static/description/index.html new file mode 100644 index 0000000000..102574be92 --- /dev/null +++ b/queue_job_batch_size/static/description/index.html @@ -0,0 +1,436 @@ + + + + + + +Queue Job Batch Size + + + +
+

Queue Job Batch Size

+ + +

Beta License: AGPL-3 OCA/queue Translate me on Weblate Try me on Runboat

+

This module allows to seemlessly split a big job into smaller jobs.

+

It uses queue_job_batch to group the created jobs into a batch.

+

Example:

+
+class ResPartner(models.Model):
+    # ...
+
+    def copy_all_partners(self):
+        # Duplicate all partners in batches of 30:
+        self.with_delay(batch_size=30).copy()
+
+# ...
+self.env['res.partner'].search([], limit=1000).copy_all_partners()
+
+

This will create 34 jobs, each one copying 30 partners (except the last one which will copy 10) and will group them into a batch.

+

Instead of batch_size, one can also use batch_count to specify the number of batches to create instead.

+

Table of contents

+ +
+

Bug Tracker

+

Bugs are tracked on GitHub Issues. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +feedback.

+

Do not contact contributors directly about support or help with technical issues.

+
+
+

Credits

+
+

Authors

+
    +
  • Akretion
  • +
+
+
+

Contributors

+ +
+
+

Maintainers

+

This module is maintained by the OCA.

+Odoo Community Association +

OCA, or the Odoo Community Association, is a nonprofit organization whose +mission is to support the collaborative development of Odoo features and +promote its widespread use.

+

This module is part of the OCA/queue project on GitHub.

+

You are welcome to contribute. To learn how please visit https://odoo-community.org/page/Contribute.

+
+
+
+ + diff --git a/queue_job_batch_size/tests/__init__.py b/queue_job_batch_size/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/queue_job_batch_size/tests/test_queue_job_batch_size.py b/queue_job_batch_size/tests/test_queue_job_batch_size.py new file mode 100644 index 0000000000..a7d8575d75 --- /dev/null +++ b/queue_job_batch_size/tests/test_queue_job_batch_size.py @@ -0,0 +1,198 @@ +# Copyright 2023 Akretion (http://www.akretion.com). +# @author Florian Mounier +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). + +from odoo.tests import common, tagged + + +@tagged("post_install", "-at_install") +class TestQueueJobBatchSize(common.SavepointCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.records = ( + cls.env.ref("base.res_partner_1") + | cls.env.ref("base.res_partner_2") + | cls.env.ref("base.res_partner_3") + | cls.env.ref("base.res_partner_4") + | cls.env.ref("base.res_partner_10") + | cls.env.ref("base.res_partner_12") + | cls.env.ref("base.res_partner_18") + ) + + def test_queue_job_batch_size(self): + self.env["queue.job"].search([]).unlink() + self.env["queue.job.batch"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=2, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 4) + self.assertEqual( + set(queue_jobs.mapped("name")), + { + "Test Queue Job Batch Size (batch 1/4)", + "Test Queue Job Batch Size (batch 2/4)", + "Test Queue Job Batch Size (batch 3/4)", + "Test Queue Job Batch Size (batch 4/4)", + }, + ) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [2, 2, 2, 1], + ) + self.assertTrue( + all( + job1.job_batch_id == job2.job_batch_id + for job1, job2 in zip(queue_jobs, queue_jobs[1:]) + ) + ) + queue_batches = self.env["queue.job.batch"].search([]) + self.assertEqual(len(queue_batches), 1) + self.assertEqual( + queue_batches.name, + "Batch of Test Queue Job Batch Size", + ) + + def test_queue_job_batch_size_other_size_4(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=4, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 2) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [4, 3], + ) + + def test_queue_job_batch_size_other_size_3(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=3, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 3) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [3, 3, 1], + ) + + def test_queue_job_batch_size_other_size_1(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=1, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 7) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [1, 1, 1, 1, 1, 1, 1], + ) + + def test_queue_job_batch_size_other_size_7(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=7, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 1) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [7], + ) + + def test_queue_job_batch_size_other_size_15(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_size=15, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 1) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [7], + ) + + def test_queue_job_batch_size_batch_count(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_count=4, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 4) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [2, 2, 2, 1], + ) + + def test_queue_job_batch_size_batch_count_1(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_count=1, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 1) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [7], + ) + + def test_queue_job_batch_size_batch_count_7(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_count=7, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 7) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [1, 1, 1, 1, 1, 1, 1], + ) + + def test_queue_job_batch_size_batch_count_15(self): + self.env["queue.job"].search([]).unlink() + self.records.with_delay( + description="Test Queue Job Batch Size", + batch_count=15, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 7) + self.assertTrue( + [len(job.records) for job in queue_jobs], + [1, 1, 1, 1, 1, 1, 1], + ) + + def test_queue_job_batch_size_no_description(self): + self.env["queue.job"].search([]).unlink() + self.env["queue.job.batch"].search([]).unlink() + self.records.with_delay( + batch_size=2, + ).name_get() + queue_jobs = self.env["queue.job"].search([]) + self.assertEqual(len(queue_jobs), 4) + self.assertEqual( + set(queue_jobs.mapped("name")), + { + "res.partner.name_get (batch 1/4)", + "res.partner.name_get (batch 2/4)", + "res.partner.name_get (batch 3/4)", + "res.partner.name_get (batch 4/4)", + }, + ) + + queue_batches = self.env["queue.job.batch"].search([]) + self.assertEqual(len(queue_batches), 1) + self.assertEqual( + queue_batches.name, + "Batch of res.partner.name_get", + ) diff --git a/setup/queue_job_batch_size/odoo/addons/queue_job_batch_size b/setup/queue_job_batch_size/odoo/addons/queue_job_batch_size new file mode 120000 index 0000000000..c51fb5302b --- /dev/null +++ b/setup/queue_job_batch_size/odoo/addons/queue_job_batch_size @@ -0,0 +1 @@ +../../../../queue_job_batch_size \ No newline at end of file diff --git a/setup/queue_job_batch_size/setup.py b/setup/queue_job_batch_size/setup.py new file mode 100644 index 0000000000..28c57bb640 --- /dev/null +++ b/setup/queue_job_batch_size/setup.py @@ -0,0 +1,6 @@ +import setuptools + +setuptools.setup( + setup_requires=['setuptools-odoo'], + odoo_addon=True, +)