diff --git a/queue_job/README.rst b/queue_job/README.rst index f401953e0c..e6b21e2570 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -85,6 +85,36 @@ Features: .. contents:: :local: +Use Cases / Context +=================== + +Odoo treats task synchronously, like when you import a list of products +it will treat each line in one big task. "Queue job" gives you the +ability to detail big tasks in many smaller ones. + +Imagine you have a lot of data to change for thousand orders, you can do +it in one step and cause a heavy load on the server, and this may affect +the performance of Odoo. With queue_job you can divide the work in jobs +and run thousand jobs (one job for each orders). An other benefit is if +one line failed it doesn't block the processing of the others, as the +jobs are independent. Plus you can schedule the jobs and set a number of +retries. + +Here are some community usage examples: + +- Mass sending invoices: + `account_invoice_mass_sending `__ +- Import data in the background: + `base_import_async `__ +- Export data in the background: + `base_export_async `__ +- Generate contract invoices with jobs: + `contract_queue_job `__ +- Generate partner invoices with + jobs:`partner_invoicing_mode `__ +- Process the Sales Automatic Workflow actions with jobs: + `sale_automatic_workflow_job `__ + Installation ============ @@ -99,10 +129,14 @@ Configuration - ``ODOO_QUEUE_JOB_CHANNELS=root:4`` or any other channels configuration. The default is ``root:1`` - - if ``xmlrpc_port`` is not set: ``ODOO_QUEUE_JOB_PORT=8069`` - - - Start Odoo with ``--load=web,queue_job`` and ``--workers`` greater - than 1. [1]_ + - ``ODOO_QUEUE_JOB_PORT=8069``, default ``--http-port`` + - ``ODOO_QUEUE_JOB_SCHEME=https``, default ``http`` + - ``ODOO_QUEUE_JOB_HOST=load-balancer``, default + ``--http-interface`` or ``localhost`` if unset + - ``ODOO_QUEUE_JOB_HTTP_AUTH_USER=jobrunner``, default empty + - ``ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD=s3cr3t``, default empty + - Start Odoo with ``--load=web,queue_job`` and ``--workers`` greater + than 1. [1]_ - Using the Odoo configuration file: @@ -116,6 +150,11 @@ Configuration (...) [queue_job] channels = root:2 + scheme = https + host = load-balancer + port = 443 + http_auth_user = jobrunner + http_auth_password = s3cr3t - Confirm the runner is starting correctly by checking the odoo log file: diff --git a/queue_job/controllers/main.py b/queue_job/controllers/main.py index c867711408..a1729b1fe8 100644 --- a/queue_job/controllers/main.py +++ b/queue_job/controllers/main.py @@ -13,8 +13,8 @@ from werkzeug.exceptions import BadRequest, Forbidden from odoo import SUPERUSER_ID, api, http -from odoo.modules.registry import Registry from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY +from odoo.tools import config from ..delay import chain, group from ..exception import FailedJobError, RetryableJobError @@ -38,8 +38,10 @@ def _prevent_commit(cr): def forbidden_commit(*args, **kwargs): raise RuntimeError( "Commit is forbidden in queue jobs. " - "If the current job is a cron running as queue job, " - "modify it to run as a normal cron." + 'You may want to enable the "Allow Commit" option on the Job ' + "Function. Alternatively, if the current job is a cron running as " + "queue job, you can modify it to run as a normal cron. More details on: " + "https://github.com/OCA/queue/wiki/Upgrade-warning:-commits-inside-jobs" ) original_commit = cr.commit @@ -103,11 +105,16 @@ def _try_perform_job(cls, env, job): job.set_done() job.store() env.flush_all() - env.cr.commit() + if not config["test_enable"]: + env.cr.commit() _logger.debug("%s done", job) @classmethod def _enqueue_dependent_jobs(cls, env, job): + if not job.should_check_dependents(): + return + + _logger.debug("%s enqueue depends started", job) tries = 0 while True: try: @@ -136,13 +143,13 @@ def _enqueue_dependent_jobs(cls, env, job): time.sleep(wait_time) else: break + _logger.debug("%s enqueue depends done", job) @classmethod def _runjob(cls, env: api.Environment, job: Job) -> None: def retry_postpone(job, message, seconds=None): job.env.clear() - with Registry(job.env.cr.dbname).cursor() as new_cr: - job.env = api.Environment(new_cr, SUPERUSER_ID, {}) + with job.in_temporary_env(): job.postpone(result=message, seconds=seconds) job.set_pending(reset_retry=False) job.store() @@ -167,6 +174,7 @@ def retry_postpone(job, message, seconds=None): # traceback in the logs we should have the traceback when all # retries are exhausted env.cr.rollback() + return except (FailedJobError, Exception) as orig_exception: buff = StringIO() @@ -174,17 +182,14 @@ def retry_postpone(job, message, seconds=None): traceback_txt = buff.getvalue() _logger.error(traceback_txt) job.env.clear() - with Registry(job.env.cr.dbname).cursor() as new_cr: - job.env = job.env(cr=new_cr) + with job.in_temporary_env(): vals = cls._get_failure_values(job, traceback_txt, orig_exception) job.set_failed(**vals) job.store() buff.close() raise - _logger.debug("%s enqueue depends started", job) cls._enqueue_dependent_jobs(env, job) - _logger.debug("%s enqueue depends done", job) @classmethod def _get_failure_values(cls, job, traceback_txt, orig_exception): @@ -229,6 +234,7 @@ def create_test_job( failure_rate=0, job_duration=0, commit_within_job=False, + failure_retry_seconds=0, ): if not http.request.env.user.has_group("base.group_erp_manager"): raise Forbidden(http.request.env._("Access Denied")) @@ -266,6 +272,12 @@ def create_test_job( except ValueError: max_retries = None + if failure_retry_seconds is not None: + try: + failure_retry_seconds = int(failure_retry_seconds) + except ValueError: + failure_retry_seconds = 0 + if size == 1: return self._create_single_test_job( priority=priority, @@ -275,6 +287,7 @@ def create_test_job( failure_rate=failure_rate, job_duration=job_duration, commit_within_job=commit_within_job, + failure_retry_seconds=failure_retry_seconds, ) if size > 1: @@ -287,6 +300,7 @@ def create_test_job( failure_rate=failure_rate, job_duration=job_duration, commit_within_job=commit_within_job, + failure_retry_seconds=failure_retry_seconds, ) return "" @@ -300,6 +314,7 @@ def _create_single_test_job( failure_rate=0, job_duration=0, commit_within_job=False, + failure_retry_seconds=0, ): delayed = ( http.request.env["queue.job"] @@ -313,6 +328,7 @@ def _create_single_test_job( failure_rate=failure_rate, job_duration=job_duration, commit_within_job=commit_within_job, + failure_retry_seconds=failure_retry_seconds, ) ) return f"job uuid: {delayed.db_record().uuid}" @@ -329,6 +345,7 @@ def _create_graph_test_jobs( failure_rate=0, job_duration=0, commit_within_job=False, + failure_retry_seconds=0, ): model = http.request.env["queue.job"] current_count = 0 @@ -355,6 +372,7 @@ def _create_graph_test_jobs( failure_rate=failure_rate, job_duration=job_duration, commit_within_job=commit_within_job, + failure_retry_seconds=failure_retry_seconds, ) ) diff --git a/queue_job/job.py b/queue_job/job.py index 3eca2d2661..86314499bd 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -8,6 +8,7 @@ import sys import uuid import weakref +from contextlib import contextmanager, nullcontext from datetime import datetime, timedelta from random import randint @@ -403,14 +404,9 @@ def __init__( raise TypeError("Job accepts only methods of Models") recordset = func.__self__ - env = recordset.env self.method_name = func.__name__ self.recordset = recordset - self.env = env - self.job_model = self.env["queue.job"] - self.job_model_name = "queue.job" - self.job_config = ( self.env["queue.job.function"].sudo().job_config(self.job_function_name) ) @@ -460,10 +456,10 @@ def __init__( self.exc_message = None self.exc_info = None - if "company_id" in env.context: - company_id = env.context["company_id"] + if "company_id" in self.env.context: + company_id = self.env.context["company_id"] else: - company_id = env.company.id + company_id = self.env.company.id self.company_id = company_id self._eta = None self.eta = eta @@ -488,7 +484,12 @@ def perform(self): """ self.retry += 1 try: - self.result = self.func(*tuple(self.args), **self.kwargs) + if self.job_config.allow_commit: + env_context_manager = self.in_temporary_env() + else: + env_context_manager = nullcontext() + with env_context_manager: + self.result = self.func(*tuple(self.args), **self.kwargs) except RetryableJobError as err: if err.ignore_retry: self.retry -= 1 @@ -508,6 +509,16 @@ def perform(self): return self.result + @contextmanager + def in_temporary_env(self): + with self.env.registry.cursor() as new_cr: + env = self.env + self._env = env(cr=new_cr) + try: + yield + finally: + self._env = env + def _get_common_dependent_jobs_query(self): return """ UPDATE queue_job @@ -538,6 +549,9 @@ def _get_common_dependent_jobs_query(self): AND state = %s; """ + def should_check_dependents(self): + return any(self.__reverse_depends_on_uuids) + def enqueue_waiting(self): sql = self._get_common_dependent_jobs_query() self.env.cr.execute(sql, (PENDING, self.uuid, DONE, WAIT_DEPENDENCIES)) @@ -666,6 +680,14 @@ def __hash__(self): def db_record(self): return self.db_records_from_uuids(self.env, [self.uuid]) + @property + def env(self): + return self.recordset.env + + @env.setter + def _env(self, env): + self.recordset = self.recordset.with_env(env) + @property def func(self): recordset = self.recordset.with_context(job_uuid=self.uuid) @@ -730,7 +752,7 @@ def model_name(self): @property def user_id(self): - return self.recordset.env.uid + return self.env.uid @property def eta(self): diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py index 681d03fadf..7fd91d68ba 100644 --- a/queue_job/jobrunner/runner.py +++ b/queue_job/jobrunner/runner.py @@ -16,111 +16,7 @@ * It maintains an in-memory priority queue of jobs that is populated from the queue_job tables in all databases. * It does not run jobs itself, but asks Odoo to run them through an - anonymous ``/queue_job/runjob`` HTTP request. [1]_ - -How to use it? --------------- - -* Optionally adjust your configuration through environment variables: - - - ``ODOO_QUEUE_JOB_CHANNELS=root:4`` (or any other channels - configuration), default ``root:1``. - - ``ODOO_QUEUE_JOB_SCHEME=https``, default ``http``. - - ``ODOO_QUEUE_JOB_HOST=load-balancer``, default ``http_interface`` - or ``localhost`` if unset. - - ``ODOO_QUEUE_JOB_PORT=443``, default ``http_port`` or 8069 if unset. - - ``ODOO_QUEUE_JOB_HTTP_AUTH_USER=jobrunner``, default empty. - - ``ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD=s3cr3t``, default empty. - - ``ODOO_QUEUE_JOB_JOBRUNNER_DB_HOST=master-db``, default ``db_host`` - or ``False`` if unset. - - ``ODOO_QUEUE_JOB_JOBRUNNER_DB_PORT=5432``, default ``db_port`` - or ``False`` if unset. - - ``ODOO_QUEUE_JOB_JOBRUNNER_DB_USER=userdb``, default ``db_user`` - or ``False`` if unset. - - ``ODOO_QUEUE_JOB_JOBRUNNER_DB_PASSWORD=passdb``, default ``db_password`` - or ``False`` if unset. - -* Alternatively, configure the channels through the Odoo configuration - file, like: - -.. code-block:: ini - - [queue_job] - channels = root:4 - scheme = https - host = load-balancer - port = 443 - http_auth_user = jobrunner - http_auth_password = s3cr3t - jobrunner_db_host = master-db - jobrunner_db_port = 5432 - jobrunner_db_user = userdb - jobrunner_db_password = passdb - -* Or, if using ``anybox.recipe.odoo``, add this to your buildout configuration: - -.. code-block:: ini - - [odoo] - recipe = anybox.recipe.odoo - (...) - queue_job.channels = root:4 - queue_job.scheme = https - queue_job.host = load-balancer - queue_job.port = 443 - queue_job.http_auth_user = jobrunner - queue_job.http_auth_password = s3cr3t - -* Start Odoo with ``--load=web,web_kanban,queue_job`` - and ``--workers`` greater than 1 [2]_, or set the ``server_wide_modules`` - option in The Odoo configuration file: - -.. code-block:: ini - - [options] - (...) - workers = 4 - server_wide_modules = web,web_kanban,queue_job - (...) - -* Or, if using ``anybox.recipe.odoo``: - -.. code-block:: ini - - [odoo] - recipe = anybox.recipe.odoo - (...) - options.workers = 4 - options.server_wide_modules = web,web_kanban,queue_job - -* Confirm the runner is starting correctly by checking the odoo log file: - -.. code-block:: none - - ...INFO...queue_job.jobrunner.runner: starting - ...INFO...queue_job.jobrunner.runner: initializing database connections - ...INFO...queue_job.jobrunner.runner: queue job runner ready for db - ...INFO...queue_job.jobrunner.runner: database connections ready - -* Create jobs (eg using base_import_async) and observe they - start immediately and in parallel. - -* Tip: to enable debug logging for the queue job, use - ``--log-handler=odoo.addons.queue_job:DEBUG`` - -Caveat ------- - -* After creating a new database or installing queue_job on an - existing database, Odoo must be restarted for the runner to detect it. - -.. rubric:: Footnotes - -.. [1] From a security standpoint, it is safe to have an anonymous HTTP - request because this request only accepts to run jobs that are - enqueued. -.. [2] It works with the threaded Odoo server too, although this way - of running Odoo is obviously not for production purposes. + anonymous ``/queue_job/runjob`` HTTP request. """ import logging diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index 5f00acab57..24467aa2d6 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -13,7 +13,7 @@ from odoo.addons.base_sparse_field.models.fields import Serialized from ..delay import Graph -from ..exception import JobError +from ..exception import JobError, RetryableJobError from ..fields import JobSerialized from ..job import ( CANCELLED, @@ -339,8 +339,8 @@ def button_done(self): return True def button_cancelled(self): - # If job was set to DONE or WAIT_DEPENDENCIES, do not cancel it - states_from = (PENDING, ENQUEUED, FAILED) + # If job was set to DONE do not cancel it + states_from = (WAIT_DEPENDENCIES, PENDING, ENQUEUED, FAILED) result = self.env._("Cancelled by %s", self.env.user.name) records = self.filtered(lambda job_: job_.state in states_from) records._change_job_state(CANCELLED, result=result) @@ -358,8 +358,11 @@ def _message_post_on_failure(self): # at every job creation domain = self._subscribe_users_domain() base_users = self.env["res.users"].search(domain) + suscribe_job_creator = self._subscribe_job_creator() for record in self: - users = base_users | record.user_id + users = base_users + if suscribe_job_creator: + users |= record.user_id record.message_subscribe(partner_ids=users.mapped("partner_id").ids) msg = record._message_failed_job() if msg: @@ -376,6 +379,14 @@ def _subscribe_users_domain(self): domain.append(("company_id", "in", companies.ids)) return domain + @api.model + def _subscribe_job_creator(self): + """ + Whether the user that created the job should be subscribed to the job, + in addition to users determined by `_subscribe_users_domain` + """ + return True + def _message_failed_job(self): """Return a message which will be posted on the job when it is failed. @@ -458,10 +469,23 @@ def related_action_open_record(self): ) return action - def _test_job(self, failure_rate=0, job_duration=0, commit_within_job=False): + def _test_job( + self, + failure_rate=0, + job_duration=0, + commit_within_job=False, + failure_retry_seconds=0, + ): _logger.info("Running test job.") if random.random() <= failure_rate: - raise JobError("Job failed") + if failure_retry_seconds: + raise RetryableJobError( + f"Retryable job failed, will be retried in " + f"{failure_retry_seconds} seconds", + seconds=failure_retry_seconds, + ) + else: + raise JobError("Job failed") if job_duration: time.sleep(job_duration) if commit_within_job: diff --git a/queue_job/models/queue_job_function.py b/queue_job/models/queue_job_function.py index 5f86f7a214..60061b1e3b 100644 --- a/queue_job/models/queue_job_function.py +++ b/queue_job/models/queue_job_function.py @@ -28,7 +28,8 @@ class QueueJobFunction(models.Model): "related_action_enable " "related_action_func_name " "related_action_kwargs " - "job_function_id ", + "job_function_id " + "allow_commit", ) def _default_channel(self): @@ -79,6 +80,12 @@ def _default_channel(self): "enable, func_name, kwargs.\n" "See the module description for details.", ) + allow_commit = fields.Boolean( + help="Allows the job to commit transactions during execution. " + "Under the hood, this executes the job in a new database cursor, " + "which incurs an overhead as it requires an extra connection to " + "the database. " + ) @api.depends("model_id.model", "method") def _compute_name(self): @@ -151,6 +158,7 @@ def job_default_config(self): related_action_func_name=None, related_action_kwargs={}, job_function_id=None, + allow_commit=False, ) def _parse_retry_pattern(self): @@ -186,6 +194,7 @@ def job_config(self, name): related_action_func_name=config.related_action.get("func_name"), related_action_kwargs=config.related_action.get("kwargs", {}), job_function_id=config.id, + allow_commit=config.allow_commit, ) def _retry_pattern_format_error_message(self): diff --git a/queue_job/readme/CONFIGURE.md b/queue_job/readme/CONFIGURE.md index 216b5358af..7239106218 100644 --- a/queue_job/readme/CONFIGURE.md +++ b/queue_job/readme/CONFIGURE.md @@ -2,9 +2,14 @@ - Adjust environment variables (optional): - `ODOO_QUEUE_JOB_CHANNELS=root:4` or any other channels configuration. The default is `root:1` - - if `xmlrpc_port` is not set: `ODOO_QUEUE_JOB_PORT=8069` - - Start Odoo with `--load=web,queue_job` and `--workers` greater than - 1.[^1] + - `ODOO_QUEUE_JOB_PORT=8069`, default `--http-port` + - `ODOO_QUEUE_JOB_SCHEME=https`, default `http` + - `ODOO_QUEUE_JOB_HOST=load-balancer`, default `--http-interface` + or `localhost` if unset + - `ODOO_QUEUE_JOB_HTTP_AUTH_USER=jobrunner`, default empty + - `ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD=s3cr3t`, default empty + - Start Odoo with `--load=web,queue_job` and `--workers` greater than + 1.[^1] - Using the Odoo configuration file: ``` ini @@ -16,6 +21,11 @@ server_wide_modules = web,queue_job (...) [queue_job] channels = root:2 +scheme = https +host = load-balancer +port = 443 +http_auth_user = jobrunner +http_auth_password = s3cr3t ``` - Confirm the runner is starting correctly by checking the odoo log diff --git a/queue_job/readme/CONTEXT.md b/queue_job/readme/CONTEXT.md new file mode 100644 index 0000000000..ca9cda79d6 --- /dev/null +++ b/queue_job/readme/CONTEXT.md @@ -0,0 +1,15 @@ +Odoo treats task synchronously, like when you import a list of products it will treat each line in one big task. +"Queue job" gives you the ability to detail big tasks in many smaller ones. + +Imagine you have a lot of data to change for thousand orders, you can do it in one step and cause a heavy load on the server, and this may affect the performance of Odoo. With queue_job you can divide the work in jobs and run thousand jobs (one job for each orders). +An other benefit is if one line failed it doesn't block the processing of the others, as the jobs are independent. +Plus you can schedule the jobs and set a number of retries. + +Here are some community usage examples: + +* Mass sending invoices: [account_invoice_mass_sending](https://github.com/OCA/account-invoicing/tree/17.0/account_invoice_mass_sending) +* Import data in the background: [base_import_async](https://github.com/OCA/queue/tree/17.0/base_import_async) +* Export data in the background: [base_export_async](https://github.com/OCA/queue/tree/17.0/base_export_async) +* Generate contract invoices with jobs: [contract_queue_job](https://github.com/OCA/contract/tree/17.0/contract_queue_job) +* Generate partner invoices with jobs:[partner_invoicing_mode](https://github.com/OCA/account-invoicing/tree/17.0/partner_invoicing_mode) +* Process the Sales Automatic Workflow actions with jobs: [sale_automatic_workflow_job](https://github.com/OCA/sale-workflow/tree/17.0/sale_automatic_workflow_job) diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index c54c9d9c00..3b8501c2cd 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -420,53 +420,87 @@

Job Queue

Table of contents

+
+

Use Cases / Context

+

Odoo treats task synchronously, like when you import a list of products +it will treat each line in one big task. “Queue job” gives you the +ability to detail big tasks in many smaller ones.

+

Imagine you have a lot of data to change for thousand orders, you can do +it in one step and cause a heavy load on the server, and this may affect +the performance of Odoo. With queue_job you can divide the work in jobs +and run thousand jobs (one job for each orders). An other benefit is if +one line failed it doesn’t block the processing of the others, as the +jobs are independent. Plus you can schedule the jobs and set a number of +retries.

+

Here are some community usage examples:

+ +
-

Installation

+

Installation

Be sure to have the requests library.

-

Configuration

+

Configuration

  • Using environment variables and command line:
    • Adjust environment variables (optional):
      • ODOO_QUEUE_JOB_CHANNELS=root:4 or any other channels configuration. The default is root:1
      • -
      • if xmlrpc_port is not set: ODOO_QUEUE_JOB_PORT=8069
      • -
      -
    • +
    • ODOO_QUEUE_JOB_PORT=8069, default --http-port
    • +
    • ODOO_QUEUE_JOB_SCHEME=https, default http
    • +
    • ODOO_QUEUE_JOB_HOST=load-balancer, default +--http-interface or localhost if unset
    • +
    • ODOO_QUEUE_JOB_HTTP_AUTH_USER=jobrunner, default empty
    • +
    • ODOO_QUEUE_JOB_HTTP_AUTH_PASSWORD=s3cr3t, default empty
    • Start Odoo with --load=web,queue_job and --workers greater than 1. [1]
  • +
+
  • Using the Odoo configuration file:
  • @@ -477,7 +511,12 @@ 

    Configuration

    (...) [queue_job] -channels = root:2 +channels = root:2 +scheme = https +host = load-balancer +port = 443 +http_auth_user = jobrunner +http_auth_password = s3cr3t
    • Confirm the runner is starting correctly by checking the odoo log @@ -507,15 +546,15 @@

      Configuration

    -

    Usage

    +

    Usage

    To use this module, you need to:

    1. Go to Job Queue menu
    -

    Developers

    +

    Developers

    -

    Delaying jobs

    +

    Delaying jobs

    The fast way to enqueue a job for a method is to use with_delay() on a record or model:

    @@ -635,7 +674,7 @@ 

    Delaying jobs

    -

    Enqueing Job Options

    +

    Enqueing Job Options

    • priority: default is 10, the closest it is to 0, the faster it will be executed
    • @@ -654,7 +693,7 @@

      Enqueing Job Options

    -

    Configure default options for jobs

    +

    Configure default options for jobs

    In earlier versions, jobs could be configured using the @job decorator. This is now obsolete, they can be configured using optional queue.job.function and queue.job.channel XML records.

    @@ -782,7 +821,7 @@

    Configure default options for job delaying any jobs.

    -

    Testing

    +

    Testing

    Asserting enqueued jobs

    The recommended way to test jobs, rather than running them directly and synchronously is to split the tests in two parts:

    @@ -897,7 +936,7 @@

    Testing

    synchronously

    -

    Patterns

    +

    Patterns

    Through the time, two main patterns emerged:

    1. For data exposed to users, a model should store the data and the @@ -924,16 +963,16 @@

      Patterns

    -

    Known issues / Roadmap

    +

    Known issues / Roadmap

    • After creating a new database or installing queue_job on an existing database, Odoo must be restarted for the runner to detect it.
    -

    Changelog

    +

    Changelog

    -

    Bug Tracker

    +

    Bug Tracker

    Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -952,16 +991,16 @@

    Bug Tracker

    Do not contact contributors directly about support or help with technical issues.

    -

    Credits

    +

    Credits

    -

    Authors

    +

    Authors

    • Camptocamp
    • ACSONE SA/NV
    -

    Contributors

    +

    Contributors

    -

    Other credits

    +

    Other credits

    The migration of this module from 17.0 to 18.0 was financially supported by Camptocamp.

    -

    Maintainers

    +

    Maintainers

    This module is maintained by the OCA.

    Odoo Community Association diff --git a/queue_job/tests/common.py b/queue_job/tests/common.py index 318f437098..f74e4ad651 100644 --- a/queue_job/tests/common.py +++ b/queue_job/tests/common.py @@ -274,7 +274,7 @@ def _add_job(self, *args, **kwargs): def _prepare_context(self, job): # pylint: disable=context-overridden - job_model = job.job_model.with_context({}) + job_model = job.env["queue.job"].with_context({}) field_records = job_model._fields["records"] # Filter the context to simulate store/load of the job job.recordset = field_records.convert_to_write(job.recordset, job_model) diff --git a/queue_job/tests/test_json_field.py b/queue_job/tests/test_json_field.py index 76bb59c977..23974e23c9 100644 --- a/queue_job/tests/test_json_field.py +++ b/queue_job/tests/test_json_field.py @@ -45,9 +45,13 @@ def test_encoder_recordset(self): "model": "res.partner", "ids": [partner.id], "su": False, - "context": expected_context, } - self.assertEqual(json.loads(value_json), expected) + result_dict = json.loads(value_json) + result_context = result_dict.pop("context") + self.assertEqual(result_dict, expected) + # context is tested separately as the order/amount of keys is not guaranteed + for key in result_context: + self.assertEqual(result_context[key], expected_context[key]) def test_encoder_recordset_list(self): demo_user = self.demo_user @@ -69,7 +73,20 @@ def test_encoder_recordset_list(self): "context": expected_context, }, ] - self.assertEqual(json.loads(value_json), expected) + result_dict = json.loads(value_json) + for result_value, expected_value in zip(result_dict, expected, strict=False): + if isinstance(expected_value, dict): + for key in result_value: + if key == "context": + for context_key in result_value["context"]: + self.assertEqual( + result_value["context"][context_key], + expected_value["context"][context_key], + ) + else: + self.assertEqual(result_value[key], expected_value[key]) + else: + self.assertEqual(result_value, expected_value) def test_decoder_recordset(self): demo_user = self.demo_user diff --git a/queue_job/tests/test_model_job_function.py b/queue_job/tests/test_model_job_function.py index 84676fdb65..9095f2a55e 100644 --- a/queue_job/tests/test_model_job_function.py +++ b/queue_job/tests/test_model_job_function.py @@ -42,6 +42,7 @@ def test_function_job_config(self): ' "func_name": "related_action_foo",' ' "kwargs": {"b": 1}}' ), + "allow_commit": True, } ) self.assertEqual( @@ -53,5 +54,6 @@ def test_function_job_config(self): related_action_func_name="related_action_foo", related_action_kwargs={"b": 1}, job_function_id=job_function.id, + allow_commit=True, ), ) diff --git a/queue_job/tests/test_run_rob_controller.py b/queue_job/tests/test_run_rob_controller.py index bb63bc82ec..1a15f4363a 100644 --- a/queue_job/tests/test_run_rob_controller.py +++ b/queue_job/tests/test_run_rob_controller.py @@ -15,3 +15,9 @@ def test_get_failure_values(self): self.assertEqual( rslt, {"exc_info": "info", "exc_name": "Exception", "exc_message": "zero"} ) + + def test_runjob_success(self): + job = self.env["queue.job"].with_delay()._test_job() + RunJobController._runjob(self.env, job) + self.assertEqual(job.state, "done") + self.assertEqual(job.db_record().state, "done") diff --git a/queue_job/tests/test_wizards.py b/queue_job/tests/test_wizards.py index 7738836d2f..54718f658a 100644 --- a/queue_job/tests/test_wizards.py +++ b/queue_job/tests/test_wizards.py @@ -69,18 +69,13 @@ def test_04_requeue_forbidden(self): def test_05_cancel_forbidden(self): wizard = self._wizard("queue.jobs.to.cancelled") - # State WAIT_DEPENDENCIES is not cancelled - self.job.state = "wait_dependencies" - wizard.set_cancelled() - self.assertEqual(self.job.state, "wait_dependencies") - # State DONE is not cancelled self.job.state = "done" wizard.set_cancelled() self.assertEqual(self.job.state, "done") - # State PENDING, ENQUEUED or FAILED will be cancelled - for test_state in ("pending", "enqueued"): + # State PENDING, ENQUEUED, WAIT_DEPENDENCIES or FAILED will be cancelled + for test_state in ("pending", "enqueued", "wait_dependencies", "failed"): self.job.state = test_state wizard.set_cancelled() self.assertEqual(self.job.state, "cancelled") @@ -99,7 +94,7 @@ def test_06_done_forbidden(self): self.assertEqual(self.job.state, "cancelled") # State WAIT_DEPENDENCIES, PENDING, ENQUEUED or FAILED will be set to DONE - for test_state in ("wait_dependencies", "pending", "enqueued"): + for test_state in ("wait_dependencies", "pending", "enqueued", "failed"): self.job.state = test_state wizard.set_done() self.assertEqual(self.job.state, "done") diff --git a/queue_job/views/queue_job_function_views.xml b/queue_job/views/queue_job_function_views.xml index 96f33bb09e..ca481f5777 100644 --- a/queue_job/views/queue_job_function_views.xml +++ b/queue_job/views/queue_job_function_views.xml @@ -10,6 +10,7 @@ + @@ -24,6 +25,7 @@ + diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml index e0e35816b7..83a11498da 100644 --- a/queue_job/views/queue_job_views.xml +++ b/queue_job/views/queue_job_views.xml @@ -24,7 +24,7 @@ />