From 5c8cd4cf865ddaff49574f649eff1957d537a85e Mon Sep 17 00:00:00 2001 From: Christy O'Reilly Date: Fri, 2 Dec 2011 14:54:21 +0000 Subject: [PATCH 001/102] Improve logging --- pyres/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/worker.py b/pyres/worker.py index 8a9f2b1..13e7143 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -224,7 +224,7 @@ def reserve(self, timeout=10): logger.debug('checking queues %s' % self.queues) job = self.job_class.reserve(self.queues, self.resq, self.__str__(), timeout=timeout) if job: - logger.info('Found job on %s' % job._queue) + logger.info('Found job on %s: %s' % (job._queue, job)) return job def working_on(self, job): From 7867e342011ae7559636a56929c2384b1a200a88 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Tue, 6 Dec 2011 13:32:57 -0500 Subject: [PATCH 002/102] add enqueue & perform timestamps, add job hooks for accessing them Classes which implement the perform() method can now also implement before_perform() and after_perform() methods which take a metadata dict. These contain the args passed in, as well as timestamps for when the job was enqueued and when it was performed, and whether the job failed and was retried. --- pyres/__init__.py | 9 ++++++--- pyres/job.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index fbf5e09..c762778 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -212,7 +212,8 @@ def enqueue(self, klass, *args): queue = getattr(klass,'queue', None) if queue: class_name = '%s.%s' % (klass.__module__, klass.__name__) - self.push(queue, {'class':class_name,'args':args}) + self.push(queue, {'class':class_name,'args':args, + 'enqueue_timestamp':time.time()}) logging.info("enqueued '%s' job on queue %s" % (class_name, queue)) if args: logging.debug("job arguments: %s" % str(args)) @@ -222,7 +223,8 @@ def enqueue(self, klass, *args): logging.warning("unable to enqueue job with class %s" % str(klass)) def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs): - payload = {'class':klass_as_string, 'queue': queue, 'args':args} + payload = {'class':klass_as_string, 'queue': queue, 'args':args, + 'enqueue_timestamp':time.time()} if 'first_attempt' in kwargs: payload['first_attempt'] = kwargs['first_attempt'] self.push(queue, payload) @@ -356,7 +358,8 @@ def _enqueue(cls, klass, *args): _self = cls() if queue: class_name = '%s.%s' % (klass.__module__, klass.__name__) - _self.push(queue, {'class':class_name,'args':args}) + _self.push(queue, {'class':class_name,'args':args, + 'enqueue_timestamp': time.time()}) @staticmethod def _current_time(): diff --git a/pyres/job.py b/pyres/job.py index be329fd..dd20c76 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -1,3 +1,4 @@ +import time from datetime import timedelta from pyres import ResQ, safe_str_to_class from pyres import failure @@ -31,6 +32,8 @@ def __init__(self, queue, payload, resq, worker=None): self.resq = resq self._worker = worker + self.enqueue_timestamp = self._payload.get("enqueue_timestamp") + # Set the default back end, jobs can override when we import them # inside perform(). failure.backend = RedisBackend @@ -43,6 +46,20 @@ def perform(self): """This method converts payload into args and calls the ``perform`` method on the payload class. + Before calling ``perform``, a ``before_perform`` class method + is called, if it exists. It takes a dictionary as an argument; + currently the only things stored on the dictionary are the + args passed into ``perform`` and a timestamp of when the job + was enqueued. + + Similarly, an ``after_perform`` class method is called after + ``perform`` is finished. The metadata dictionary contains the + same data, plus a timestamp of when the job was performed, a + ``failed`` boolean value, and if it did fail, a ``retried`` + boolean value. This method is called after retry, and is + called regardless of whether an exception is ultimately thrown + by the perform method. + #@ add entry_point loading """ @@ -51,11 +68,29 @@ def perform(self): payload_class.resq = self.resq args = self._payload.get("args") + metadata = dict(args=args) + if self.enqueue_timestamp: + metadata["enqueue_timestamp"] = self.enqueue_timestamp + + before_perform = getattr(payload_class, "before_perform", None) + if before_perform: + before_perform(metadata) + + metadata["failed"] = False + metadata["perform_timestamp"] = time.time() try: return payload_class.perform(*args) except: + metadata["failed"] = True if not self.retry(payload_class, args): + metadata["retried"] = False raise + else: + metadata["retried"] = True + finally: + after_perform = getattr(payload_class, "after_perform", None) + if after_perform: + after_perform(metadata) def fail(self, exception): """This method provides a way to fail a job and will use whatever From 63b383793ad5ad5a74b27d40d65300f21f19d6a9 Mon Sep 17 00:00:00 2001 From: "James M. Henderson" Date: Tue, 17 Apr 2012 23:17:05 -0400 Subject: [PATCH 003/102] Updated views to work with pystache 0.5.0 --- requirements.txt | 2 +- resweb/views.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1a7b5c6..0ec06fd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ simplejson==2.0.9 itty==0.6.2 redis>=1.34.1 -pystache==0.1.0 +pystache==0.5.0 setproctitle>=1.0 diff --git a/resweb/views.py b/resweb/views.py index 3dd8a63..c3407fa 100644 --- a/resweb/views.py +++ b/resweb/views.py @@ -7,12 +7,16 @@ import datetime TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'templates') -class ResWeb(pystache.View): +class ResWeb(pystache.TemplateSpec): template_path = TEMPLATE_PATH + renderer = pystache.Renderer(search_dirs = template_path) + def __init__(self, host): - super(ResWeb, self).__init__() self.resq = host + def render(self): + return self.renderer.render(self) + def media_folder(self): return '/media/' @@ -122,6 +126,7 @@ def empty_workers(self): return False else: return True + class Queues(Overview): template_name = 'queue_full' From 43459f0b1cbf593ae2a8248f8fea53f2b14e6f05 Mon Sep 17 00:00:00 2001 From: Bernardo Heynemann Date: Thu, 26 Apr 2012 15:39:49 -0300 Subject: [PATCH 004/102] Added support for timeing out workers --- pyres/worker.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index 8a9f2b1..c064839 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -153,13 +153,26 @@ def work(self, interval=5): datetime.datetime.now())) try: - os.waitpid(self.child, 0) + start = datetime.datetime.now() + result = (0, 0) + timed_out = False + + # waits for the result or times out + while result == (0, 0) and not timed_out: + result = os.waitpid(self.child, os.WNOHANG) + now = datetime.datetime.now() + + if self.timeout and ((now - start).seconds > self.timeout): + os.kill(self.child, signal.SIGKILL) + os.waitpid(-1, os.WNOHANG) + timed_out = True + except OSError as ose: import errno if ose.errno != errno.EINTR: raise ose - #os.wait() + logger.debug('done waiting') else: self._setproctitle("Processing %s since %s" % @@ -283,8 +296,9 @@ def worker_pids(self): grep pyres_worker").split("\n")) @classmethod - def run(cls, queues, server="localhost:6379", interval=None): + def run(cls, queues, server="localhost:6379", interval=None, timeout=None): worker = cls(queues=queues, server=server) + worker.timeout = timeout if interval is not None: worker.work(interval) else: From 75760da8d726a84cd85d599a3863c6c7d362d4eb Mon Sep 17 00:00:00 2001 From: Bernardo Heynemann Date: Thu, 26 Apr 2012 15:51:03 -0300 Subject: [PATCH 005/102] Script allow passing of timeout --- pyres/scripts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyres/scripts.py b/pyres/scripts.py index fe65fd4..0debfbf 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -106,6 +106,7 @@ def pyres_worker(): parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') + parser.add_option("-t", '--timeout', dest='timeout', default=None, help='the timeout in seconds for this worker') (options,args) = parser.parse_args() if len(args) != 1: @@ -120,6 +121,8 @@ def pyres_worker(): if interval is not None: interval = int(interval) + timeout = options.timeout is None and options.timeout or int(options.timeout) + queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) - Worker.run(queues, server, interval) + Worker.run(queues, server, interval, timeout=timeout) From 551b47bee951d3f9e2f9c4fdecf8061ad4b346f8 Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Fri, 27 Apr 2012 14:13:37 -0300 Subject: [PATCH 006/102] Adding job to the error queue after a timeout. --- pyres/exceptions.py | 3 +++ pyres/worker.py | 19 ++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/pyres/exceptions.py b/pyres/exceptions.py index 6269e81..0ca1f2f 100644 --- a/pyres/exceptions.py +++ b/pyres/exceptions.py @@ -1,2 +1,5 @@ class NoQueueError(Exception): pass + +class TimeoutError(RuntimeError): + pass \ No newline at end of file diff --git a/pyres/worker.py b/pyres/worker.py index c064839..3cfd9fc 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -6,7 +6,7 @@ import commands import random -from pyres.exceptions import NoQueueError +from pyres.exceptions import NoQueueError, TimeoutError from pyres.job import Job from pyres import ResQ, Stat, __version__ @@ -155,17 +155,19 @@ def work(self, interval=5): try: start = datetime.datetime.now() result = (0, 0) - timed_out = False # waits for the result or times out - while result == (0, 0) and not timed_out: + while True: result = os.waitpid(self.child, os.WNOHANG) - now = datetime.datetime.now() + if result != (0, 0): + break + time.sleep(0.5) + now = datetime.datetime.now() if self.timeout and ((now - start).seconds > self.timeout): os.kill(self.child, signal.SIGKILL) os.waitpid(-1, os.WNOHANG) - timed_out = True + raise TimeoutError("Timed out after %d seconds" % self.timeout) except OSError as ose: import errno @@ -173,6 +175,13 @@ def work(self, interval=5): if ose.errno != errno.EINTR: raise ose + except TimeoutError as e: + exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() + logger.exception("%s timed out: %s" % (job, e)) + job.fail(exceptionTraceback) + self.failed() + self.done_working() + logger.debug('done waiting') else: self._setproctitle("Processing %s since %s" % From 921db924f5003d8ed7d8ec367391e6b57fdf7348 Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Fri, 27 Apr 2012 14:22:09 -0300 Subject: [PATCH 007/102] Small cleanup --- pyres/worker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyres/worker.py b/pyres/worker.py index 3cfd9fc..60a2975 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -154,7 +154,6 @@ def work(self, interval=5): try: start = datetime.datetime.now() - result = (0, 0) # waits for the result or times out while True: From a297dba0f8429f6561d85473b0d53317b19e0a91 Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Fri, 27 Apr 2012 15:49:14 -0300 Subject: [PATCH 008/102] Adding basic test for workers with timeout values --- pyres/worker.py | 6 +++--- tests/__init__.py | 8 ++++++++ tests/test_worker.py | 19 ++++++++++++++++++- 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index 60a2975..49ca1b9 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -25,13 +25,14 @@ class and passes a comma-separated list of queues to listen on.:: job_class = Job - def __init__(self, queues=(), server="localhost:6379", password=None): + def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None): self.queues = queues self.validate_queues() self._shutdown = False self.child = None self.pid = os.getpid() self.hostname = os.uname()[1] + self.timeout = timeout if isinstance(server, basestring): self.resq = ResQ(server=server, password=password) @@ -305,8 +306,7 @@ def worker_pids(self): @classmethod def run(cls, queues, server="localhost:6379", interval=None, timeout=None): - worker = cls(queues=queues, server=server) - worker.timeout = timeout + worker = cls(queues=queues, server=server, timeout=timeout) if interval is not None: worker.work(interval) else: diff --git a/tests/__init__.py b/tests/__init__.py index f8b403a..bd76bbf 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -37,6 +37,14 @@ def perform(fail_until): else: return True +class TimeoutJob(object): + queue = 'basic' + + @staticmethod + def perform(wait_for): + import time + time.sleep(wait_for) + return "Done Sleeping" class TestProcess(object): queue = 'high' diff --git a/tests/test_worker.py b/tests/test_worker.py index d1903a3..e5a58a3 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -1,4 +1,4 @@ -from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob +from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob from pyres import ResQ from pyres.job import Job from pyres.scheduler import Scheduler @@ -202,6 +202,23 @@ def test_retry_on_exception(self): assert True == worker.process() assert worker.get_failed() == 0 + def test_kills_stale_workers_after_timeout(self): + import signal + timeout = 1 + + worker = Worker(['basic'], timeout=timeout) + self.resq.enqueue(TimeoutJob, timeout + 1) + + child = os.fork() + if child: + assert worker.get_failed() == 0 + time.sleep(timeout + 2) + os.kill(child, signal.SIGKILL) + os.waitpid(-1, os.WNOHANG) + assert worker.get_failed() == 1 + else: + worker.work() + def test_retries_give_up_eventually(self): now = datetime.datetime.now() self.set_current_time(now) From db04b97b1ac68c0c1f41e013a0ff6f1a3bcce984 Mon Sep 17 00:00:00 2001 From: Bernardo Heynemann Date: Tue, 1 May 2012 23:16:31 -0300 Subject: [PATCH 009/102] Updated as proposed in the previous pull request. --- pyres/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/scripts.py b/pyres/scripts.py index 0debfbf..7302b82 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -121,7 +121,7 @@ def pyres_worker(): if interval is not None: interval = int(interval) - timeout = options.timeout is None and options.timeout or int(options.timeout) + timeout = options.timeout and int(options.timeout) queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) From 8762b63b30210b87c0b1bc8505897d5b1056482d Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Sat, 5 May 2012 17:10:30 -0300 Subject: [PATCH 010/102] Splitting the Worker.work in two methods, no behavior changes. The purpose of this change was to make testing easier, but it also helps with making the code easier to read. --- pyres/worker.py | 127 ++++++++++++++++++++++++++---------------------- 1 file changed, 68 insertions(+), 59 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index 49ca1b9..0e40143 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -128,8 +128,6 @@ def work(self, interval=5): that job to make sure another worker won't run it, then *forks* itself to work on that job. - Finally, the ``process`` method actually processes the job by eventually calling the Job instance's ``perform`` method. - """ self._setproctitle("Starting") self.startup() @@ -142,63 +140,7 @@ def work(self, interval=5): job = self.reserve(interval) if job: - logger.debug('picked up job') - logger.debug('job details: %s' % job) - self.before_fork(job) - self.child = os.fork() - if self.child: - self._setproctitle("Forked %s at %s" % - (self.child, - datetime.datetime.now())) - logger.info('Forked %s at %s' % (self.child, - datetime.datetime.now())) - - try: - start = datetime.datetime.now() - - # waits for the result or times out - while True: - result = os.waitpid(self.child, os.WNOHANG) - if result != (0, 0): - break - time.sleep(0.5) - - now = datetime.datetime.now() - if self.timeout and ((now - start).seconds > self.timeout): - os.kill(self.child, signal.SIGKILL) - os.waitpid(-1, os.WNOHANG) - raise TimeoutError("Timed out after %d seconds" % self.timeout) - - except OSError as ose: - import errno - - if ose.errno != errno.EINTR: - raise ose - - except TimeoutError as e: - exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() - logger.exception("%s timed out: %s" % (job, e)) - job.fail(exceptionTraceback) - self.failed() - self.done_working() - - logger.debug('done waiting') - else: - self._setproctitle("Processing %s since %s" % - (job._queue, - datetime.datetime.now())) - logger.info('Processing %s since %s' % - (job._queue, datetime.datetime.now())) - self.after_fork(job) - - # re-seed the Python PRNG after forking, otherwise - # all job process will share the same sequence of - # random numbers - random.seed() - - self.process(job) - os._exit(0) - self.child = None + self.fork_worker(job) else: if interval == 0: break @@ -207,6 +149,73 @@ def work(self, interval=5): #time.sleep(interval) self.unregister_worker() + def fork_worker(self, job): + """Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child + process that will process the job. It's also responsible for monitoring the child process + and handling hangs and crashes. + + Finally, the ``process`` method actually processes the job by eventually calling the Job + instance's ``perform`` method. + + """ + logger.debug('picked up job') + logger.debug('job details: %s' % job) + self.before_fork(job) + self.child = os.fork() + if self.child: + self._setproctitle("Forked %s at %s" % + (self.child, + datetime.datetime.now())) + logger.info('Forked %s at %s' % (self.child, + datetime.datetime.now())) + + try: + start = datetime.datetime.now() + + # waits for the result or times out + while True: + result = os.waitpid(self.child, os.WNOHANG) + if result != (0, 0): + break + time.sleep(0.5) + + now = datetime.datetime.now() + if self.timeout and ((now - start).seconds > self.timeout): + os.kill(self.child, signal.SIGKILL) + os.waitpid(-1, os.WNOHANG) + raise TimeoutError("Timed out after %d seconds" % self.timeout) + + except OSError as ose: + import errno + + if ose.errno != errno.EINTR: + raise ose + + except TimeoutError as e: + exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() + logger.exception("%s timed out: %s" % (job, e)) + job.fail(exceptionTraceback) + self.failed() + self.done_working() + + logger.debug('done waiting') + else: + self._setproctitle("Processing %s since %s" % + (job._queue, + datetime.datetime.now())) + logger.info('Processing %s since %s' % + (job._queue, datetime.datetime.now())) + self.after_fork(job) + + # re-seed the Python PRNG after forking, otherwise + # all job process will share the same sequence of + # random numbers + random.seed() + + self.process(job) + os._exit(0) + self.child = None + def before_fork(self, job): """ hook for making changes immediately before forking to process From eb2cbf2c33533eef770520be444d3f26df682f1d Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Sat, 5 May 2012 17:19:54 -0300 Subject: [PATCH 011/102] Adding test to check whether pyres identifies a crashed process as a failure (currently failing). The biggest problem shown by this test is the fact that, even after the child process has crashed and the worker is ready to process new jobs, the entry associating the job with the worker remain on Redis. It also refactors the test for timeout errors on staled workers, making it simpler. --- tests/__init__.py | 10 ++++++++++ tests/test_worker.py | 27 ++++++++++++++++----------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index bd76bbf..5f5a3f5 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -46,6 +46,16 @@ def perform(wait_for): time.sleep(wait_for) return "Done Sleeping" +class CrashJob(object): + queue = 'basic' + + @staticmethod + def perform(): + # Dangerous, this will cause a hard crash of the python process + import ctypes + ctypes.string_at(1) + return "Never got here" + class TestProcess(object): queue = 'high' diff --git a/tests/test_worker.py b/tests/test_worker.py index e5a58a3..222545b 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -1,4 +1,4 @@ -from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob +from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob from pyres import ResQ from pyres.job import Job from pyres.scheduler import Scheduler @@ -203,21 +203,26 @@ def test_retry_on_exception(self): assert worker.get_failed() == 0 def test_kills_stale_workers_after_timeout(self): - import signal timeout = 1 worker = Worker(['basic'], timeout=timeout) self.resq.enqueue(TimeoutJob, timeout + 1) - child = os.fork() - if child: - assert worker.get_failed() == 0 - time.sleep(timeout + 2) - os.kill(child, signal.SIGKILL) - os.waitpid(-1, os.WNOHANG) - assert worker.get_failed() == 1 - else: - worker.work() + assert worker.get_failed() == 0 + worker.fork_worker(worker.reserve()) + assert worker.get_failed() == 1 + + def test_detect_crashed_workers_as_failures(self): + worker = Worker(['basic']) + self.resq.enqueue(CrashJob) + + assert worker.job() == {} + assert worker.get_failed() == 0 + + worker.fork_worker(worker.reserve()) + + assert worker.job() == {} + assert worker.get_failed() == 1 def test_retries_give_up_eventually(self): now = datetime.datetime.now() From f0b27b70c729c6c86e9d3f0471989096a24917d4 Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Sat, 5 May 2012 17:45:28 -0300 Subject: [PATCH 012/102] Adding tests for calling sys.exit() from job's perform method. (Currently failing) --- tests/__init__.py | 9 +++++++++ tests/test_worker.py | 26 +++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 5f5a3f5..cc80a4c 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -56,6 +56,15 @@ def perform(): ctypes.string_at(1) return "Never got here" +class PrematureExitJob(object): + queue = 'basic' + + @staticmethod + def perform(exit_code): + import sys + sys.exit(exit_code) + return "Never got here" + class TestProcess(object): queue = 'high' diff --git a/tests/test_worker.py b/tests/test_worker.py index 222545b..1779f9b 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -1,4 +1,4 @@ -from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob +from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob, PrematureExitJob from pyres import ResQ from pyres.job import Job from pyres.scheduler import Scheduler @@ -224,6 +224,30 @@ def test_detect_crashed_workers_as_failures(self): assert worker.job() == {} assert worker.get_failed() == 1 + def test_detect_non_0_sys_exit_as_failure(self): + worker = Worker(['basic']) + self.resq.enqueue(PrematureExitJob, 9) + + assert worker.job() == {} + assert worker.get_failed() == 0 + + worker.fork_worker(worker.reserve()) + + assert worker.job() == {} + assert worker.get_failed() == 1 + + def test_detect_code_0_sys_exit_as_success(self): + worker = Worker(['basic']) + self.resq.enqueue(PrematureExitJob, 0) + + assert worker.job() == {} + assert worker.get_failed() == 0 + + worker.fork_worker(worker.reserve()) + + assert worker.job() == {} + assert worker.get_failed() == 0 + def test_retries_give_up_eventually(self): now = datetime.datetime.now() self.set_current_time(now) From 614d06744e18f20909c777e51c98513332de2e8a Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Sat, 5 May 2012 19:23:34 -0300 Subject: [PATCH 013/102] Adding more tests for unexpected exits during the job execution. --- tests/__init__.py | 8 ++++++++ tests/test_worker.py | 26 +++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index cc80a4c..a75685a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -65,6 +65,14 @@ def perform(exit_code): sys.exit(exit_code) return "Never got here" +class PrematureHardExitJob(object): + queue = 'basic' + + @staticmethod + def perform(exit_code): + os._exit(exit_code) + return "Never got here" + class TestProcess(object): queue = 'high' diff --git a/tests/test_worker.py b/tests/test_worker.py index 1779f9b..6e35367 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -1,4 +1,4 @@ -from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob, PrematureExitJob +from tests import PyResTests, Basic, TestProcess, ErrorObject, RetryOnExceptionJob, TimeoutJob, CrashJob, PrematureExitJob, PrematureHardExitJob from pyres import ResQ from pyres.job import Job from pyres.scheduler import Scheduler @@ -248,6 +248,30 @@ def test_detect_code_0_sys_exit_as_success(self): assert worker.job() == {} assert worker.get_failed() == 0 + def test_detect_non_0_os_exit_as_failure(self): + worker = Worker(['basic']) + self.resq.enqueue(PrematureHardExitJob, 9) + + assert worker.job() == {} + assert worker.get_failed() == 0 + + worker.fork_worker(worker.reserve()) + + assert worker.job() == {} + assert worker.get_failed() == 1 + + def test_detect_code_0_os_exit_as_success(self): + worker = Worker(['basic']) + self.resq.enqueue(PrematureHardExitJob, 0) + + assert worker.job() == {} + assert worker.get_failed() == 0 + + worker.fork_worker(worker.reserve()) + + assert worker.job() == {} + assert worker.get_failed() == 0 + def test_retries_give_up_eventually(self): now = datetime.datetime.now() self.set_current_time(now) From 56d9c085679e2f543e37f63e9a8de698be30baca Mon Sep 17 00:00:00 2001 From: Cezar Sa Espinola Date: Sat, 5 May 2012 19:29:57 -0300 Subject: [PATCH 014/102] Better handling crashes and unexpected exits in the forked worker process. Fixes all the previously added tests. --- pyres/exceptions.py | 8 +++++- pyres/worker.py | 64 +++++++++++++++++++++++++++++---------------- 2 files changed, 49 insertions(+), 23 deletions(-) diff --git a/pyres/exceptions.py b/pyres/exceptions.py index 0ca1f2f..1ced01d 100644 --- a/pyres/exceptions.py +++ b/pyres/exceptions.py @@ -1,5 +1,11 @@ class NoQueueError(Exception): pass -class TimeoutError(RuntimeError): +class JobError(RuntimeError): + pass + +class TimeoutError(JobError): + pass + +class CrashError(JobError): pass \ No newline at end of file diff --git a/pyres/worker.py b/pyres/worker.py index 0e40143..8379319 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -6,7 +6,7 @@ import commands import random -from pyres.exceptions import NoQueueError, TimeoutError +from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError from pyres.job import Job from pyres import ResQ, Stat, __version__ @@ -174,9 +174,17 @@ def fork_worker(self, job): # waits for the result or times out while True: - result = os.waitpid(self.child, os.WNOHANG) - if result != (0, 0): - break + pid, status = os.waitpid(self.child, os.WNOHANG) + if pid != 0: + if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0: + break + if os.WIFSTOPPED(status): + logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status)) + else: + if os.WIFSIGNALED(status): + raise CrashError("Unexpected exit by signal %d" % os.WTERMSIG(status)) + raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status)) + time.sleep(0.5) now = datetime.datetime.now() @@ -190,13 +198,13 @@ def fork_worker(self, job): if ose.errno != errno.EINTR: raise ose - - except TimeoutError as e: - exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() - logger.exception("%s timed out: %s" % (job, e)) - job.fail(exceptionTraceback) - self.failed() - self.done_working() + except JobError: + self._handle_job_exception(job) + finally: + # If the child process' job called os._exit manually we need to + # finish the clean up here. + if self.job(): + self.done_working() logger.debug('done waiting') else: @@ -236,21 +244,33 @@ def before_process(self, job): def process(self, job=None): if not job: job = self.reserve() + + job_failed = False try: - self.working_on(job) - job = self.before_process(job) - return job.perform() - except Exception, e: - exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() - logger.exception("%s failed: %s" % (job, e)) - job.fail(exceptionTraceback) - self.failed() - else: - logger.info('completed job') - logger.debug('job details: %s' % job) + try: + self.working_on(job) + job = self.before_process(job) + return job.perform() + except Exception: + job_failed = True + self._handle_job_exception(job) + except SystemExit, e: + if e.code != 0: + job_failed = True + self._handle_job_exception(job) + + if not job_failed: + logger.info('completed job') + logger.debug('job details: %s' % job) finally: self.done_working() + def _handle_job_exception(self, job): + exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() + logger.exception("%s failed: %s" % (job, exceptionValue)) + job.fail(exceptionTraceback) + self.failed() + def reserve(self, timeout=10): logger.debug('checking queues %s' % self.queues) job = self.job_class.reserve(self.queues, self.resq, self.__str__(), timeout=timeout) From 67d68335c7ac84e6cbaa2ff879b92a6032657ed7 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 9 May 2012 09:03:00 -0500 Subject: [PATCH 015/102] version bump for release --- pyres/__init__.py | 2 +- requirements.txt | 2 +- setup.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index fbf5e09..95799c8 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.1' +__version__ = '1.2' from redis import Redis import pyres.json_parser as json diff --git a/requirements.txt b/requirements.txt index 0ec06fd..6b9b97f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ simplejson==2.0.9 itty==0.6.2 -redis>=1.34.1 +redis==2.4.12 pystache==0.5.0 setproctitle>=1.0 diff --git a/setup.py b/setup.py index d5276d5..2ec107a 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version='1.1' +version='1.2' setup( name='pyres', @@ -25,7 +25,7 @@ install_requires=[ 'simplejson>=2.0.9', 'itty>=0.6.2', - 'redis>=1.34.1', + 'redis==2.4.12', 'pystache>=0.1.0', 'setproctitle>=1.0' ], From 629143283bd6a38d35ae8886d6c5543cdf27f724 Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 28 May 2012 21:03:25 -0500 Subject: [PATCH 016/102] adding travis.yml --- .travis.yml | 10 ++++++++++ requirements-test.txt | 1 + 2 files changed, 11 insertions(+) create mode 100644 .travis.yml create mode 100644 requirements-test.txt diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..f29bf2b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,10 @@ +language: python +python: + - "2.6" + - "2.7" +# command to install dependencies +install: + - pip install -r requirements-test.txt --use-mirrors + - pip install -r requirements.txt --use-mirrors +# command to run tests +script: nosetests diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000..866dd41 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1 @@ +nose==1.1.2 From 2231739b03881decfc47208e54875a464e92c69e Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 28 May 2012 21:12:21 -0500 Subject: [PATCH 017/102] adding travis status badge --- README.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.markdown b/README.markdown index 0f07681..adc52b2 100644 --- a/README.markdown +++ b/README.markdown @@ -8,6 +8,10 @@ Pyres - a Resque clone Because of some differences between ruby and python, there are a couple of places where I chose speed over correctness. The goal will be to eventually take the application and make it more pythonic without sacrificing the awesome functionality found in resque. At the same time, I hope to stay within the bounds of the original api and web interface. +## Travis CI + +Currently, pyres is being tested via travis ci for python version 2.6 and 2.7: +[![Build Status](https://secure.travis-ci.org/binarydud/pyres.png)](http://travis-ci.org/binarydud/pyres) ## Running Tests From 154d91c99a7a22e0446f1065466c24b89e3050c4 Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 28 May 2012 21:50:38 -0500 Subject: [PATCH 018/102] adding pypy to environments tested --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index f29bf2b..d000dd9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,6 +2,7 @@ language: python python: - "2.6" - "2.7" + - "pypy" # command to install dependencies install: - pip install -r requirements-test.txt --use-mirrors From 01a0fe1ce21725f43b6596cee448f8a49f7b3b0b Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 28 May 2012 22:02:06 -0500 Subject: [PATCH 019/102] tweaking readme --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index adc52b2..0af6efb 100644 --- a/README.markdown +++ b/README.markdown @@ -10,7 +10,7 @@ Because of some differences between ruby and python, there are a couple of place ## Travis CI -Currently, pyres is being tested via travis ci for python version 2.6 and 2.7: +Currently, pyres is being tested via travis ci for python version 2.6, 2.7, and pypy: [![Build Status](https://secure.travis-ci.org/binarydud/pyres.png)](http://travis-ci.org/binarydud/pyres) ## Running Tests From 31dcafdd38de7b4352eba1217b735ae58cc726ae Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 1 Jun 2012 21:03:28 -0500 Subject: [PATCH 020/102] getting ready to remove resweb --- HISTORY.md | 6 ++++++ requirements.txt | 2 -- roadmap.md | 32 +++++++++++++------------------- setup.py | 6 +----- 4 files changed, 20 insertions(+), 26 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 40d369d..e8e8c85 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,9 @@ +##1.3 (2012-06-01) +* remove resweb from pyres + +##1.2 +* release with changes from pull requests + ##1.1 (2011-06-16) * api change based on redis-py * setproctitle requirements fix diff --git a/requirements.txt b/requirements.txt index 6b9b97f..1f870e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,3 @@ simplejson==2.0.9 -itty==0.6.2 redis==2.4.12 -pystache==0.5.0 setproctitle>=1.0 diff --git a/roadmap.md b/roadmap.md index ca810ce..9734065 100644 --- a/roadmap.md +++ b/roadmap.md @@ -1,21 +1,15 @@ pyres todo and roadmap -Version 0.6 -=========== -* better webtests -* resweb pagination - -Version 0.7 -=========== -* resweb controls for failed queues -* scheduled tasks - -Version 0.8 -=========== -* horde package integration -* web interface to horde - -Version 1.0 -=========== -* stabilize the api -* semantic versioning \ No newline at end of file +1.3 +=== +* resweb moved into own package + +2.0 +=== +* move from duck typed class to a decorated function for jobs +* add better hooks, like retools + +2.1 +=== +* add namespace support +* cleanup workers/extensions diff --git a/setup.py b/setup.py index 2ec107a..fdd19fe 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version='1.2' +version='1.3' setup( name='pyres', @@ -14,19 +14,15 @@ packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), download_url='http://pypi.python.org/packages/source/p/pyres/pyres-%s.tar.gz' % version, include_package_data=True, - package_data={'resweb': ['templates/*.mustache','media/*']}, entry_points = """\ [console_scripts] pyres_manager=pyres.scripts:pyres_manager pyres_scheduler=pyres.scripts:pyres_scheduler - pyres_web=pyres.scripts:pyres_web pyres_worker=pyres.scripts:pyres_worker """, install_requires=[ 'simplejson>=2.0.9', - 'itty>=0.6.2', 'redis==2.4.12', - 'pystache>=0.1.0', 'setproctitle>=1.0' ], classifiers = [ From 0f9296620a010b79de291898f2911f6f9f5f68e2 Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 1 Jun 2012 21:04:35 -0500 Subject: [PATCH 021/102] removing resweb --- resweb/__init__.py | 0 resweb/media/idle.png | Bin 661 -> 0 bytes resweb/media/jquery-1.3.2.min.js | 19 - resweb/media/jquery.relatize_date.js | 95 ---- resweb/media/poll.png | Bin 627 -> 0 bytes resweb/media/ranger.js | 24 - resweb/media/reset.css | 48 -- resweb/media/style.css | 83 ---- resweb/media/working.png | Bin 792 -> 0 bytes resweb/server.py | 126 ----- resweb/templates/delayed.mustache | 31 -- resweb/templates/delayed_timestamp.mustache | 30 -- resweb/templates/failed.mustache | 71 --- resweb/templates/footer.mustache | 9 - resweb/templates/header.mustache | 28 -- resweb/templates/overview.mustache | 5 - resweb/templates/queue.mustache | 29 -- resweb/templates/queue_full.mustache | 3 - resweb/templates/queues.mustache | 41 -- resweb/templates/stat.mustache | 13 - resweb/templates/stats.mustache | 37 -- resweb/templates/worker.mustache | 37 -- resweb/templates/workers.mustache | 38 -- resweb/templates/working.mustache | 33 -- resweb/templates/working_full.mustache | 3 - resweb/views.py | 517 -------------------- 26 files changed, 1320 deletions(-) delete mode 100644 resweb/__init__.py delete mode 100755 resweb/media/idle.png delete mode 100644 resweb/media/jquery-1.3.2.min.js delete mode 100644 resweb/media/jquery.relatize_date.js delete mode 100755 resweb/media/poll.png delete mode 100644 resweb/media/ranger.js delete mode 100644 resweb/media/reset.css delete mode 100644 resweb/media/style.css delete mode 100755 resweb/media/working.png delete mode 100644 resweb/server.py delete mode 100644 resweb/templates/delayed.mustache delete mode 100644 resweb/templates/delayed_timestamp.mustache delete mode 100644 resweb/templates/failed.mustache delete mode 100644 resweb/templates/footer.mustache delete mode 100644 resweb/templates/header.mustache delete mode 100644 resweb/templates/overview.mustache delete mode 100644 resweb/templates/queue.mustache delete mode 100644 resweb/templates/queue_full.mustache delete mode 100644 resweb/templates/queues.mustache delete mode 100644 resweb/templates/stat.mustache delete mode 100644 resweb/templates/stats.mustache delete mode 100644 resweb/templates/worker.mustache delete mode 100644 resweb/templates/workers.mustache delete mode 100644 resweb/templates/working.mustache delete mode 100644 resweb/templates/working_full.mustache delete mode 100644 resweb/views.py diff --git a/resweb/__init__.py b/resweb/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/resweb/media/idle.png b/resweb/media/idle.png deleted file mode 100755 index 50ffda61ab16b224efdcab6bda983379f53d365a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 661 zcmV;G0&4w2}`!D0)fBzZfM7V^)`_Pe_`T zUEJg)n~=sEVe`_bhp_(t?G5`e5|NHmvf_u}f9wv6dA3Qqox5cHDIl_7t zOazI89_+@ht+^TfBt~ke|VJNGV%(sFms7<0Hg6UJNq|KK44(r=0;J6VZg6nzt})[^>]*$|^#([\w-]+)$/,f=/^.[^:#\[\.,]*$/;o.fn=o.prototype={init:function(E,H){E=E||document;if(E.nodeType){this[0]=E;this.length=1;this.context=E;return this}if(typeof E==="string"){var G=D.exec(E);if(G&&(G[1]||!H)){if(G[1]){E=o.clean([G[1]],H)}else{var I=document.getElementById(G[3]);if(I&&I.id!=G[3]){return o().find(E)}var F=o(I||[]);F.context=document;F.selector=E;return F}}else{return o(H).find(E)}}else{if(o.isFunction(E)){return o(document).ready(E)}}if(E.selector&&E.context){this.selector=E.selector;this.context=E.context}return this.setArray(o.isArray(E)?E:o.makeArray(E))},selector:"",jquery:"1.3.2",size:function(){return this.length},get:function(E){return E===g?Array.prototype.slice.call(this):this[E]},pushStack:function(F,H,E){var G=o(F);G.prevObject=this;G.context=this.context;if(H==="find"){G.selector=this.selector+(this.selector?" ":"")+E}else{if(H){G.selector=this.selector+"."+H+"("+E+")"}}return G},setArray:function(E){this.length=0;Array.prototype.push.apply(this,E);return this},each:function(F,E){return o.each(this,F,E)},index:function(E){return o.inArray(E&&E.jquery?E[0]:E,this)},attr:function(F,H,G){var E=F;if(typeof F==="string"){if(H===g){return this[0]&&o[G||"attr"](this[0],F)}else{E={};E[F]=H}}return this.each(function(I){for(F in E){o.attr(G?this.style:this,F,o.prop(this,E[F],G,I,F))}})},css:function(E,F){if((E=="width"||E=="height")&&parseFloat(F)<0){F=g}return this.attr(E,F,"curCSS")},text:function(F){if(typeof F!=="object"&&F!=null){return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(F))}var E="";o.each(F||this,function(){o.each(this.childNodes,function(){if(this.nodeType!=8){E+=this.nodeType!=1?this.nodeValue:o.fn.text([this])}})});return E},wrapAll:function(E){if(this[0]){var F=o(E,this[0].ownerDocument).clone();if(this[0].parentNode){F.insertBefore(this[0])}F.map(function(){var G=this;while(G.firstChild){G=G.firstChild}return G}).append(this)}return this},wrapInner:function(E){return this.each(function(){o(this).contents().wrapAll(E)})},wrap:function(E){return this.each(function(){o(this).wrapAll(E)})},append:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.appendChild(E)}})},prepend:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.insertBefore(E,this.firstChild)}})},before:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this)})},after:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this.nextSibling)})},end:function(){return this.prevObject||o([])},push:[].push,sort:[].sort,splice:[].splice,find:function(E){if(this.length===1){var F=this.pushStack([],"find",E);F.length=0;o.find(E,this[0],F);return F}else{return this.pushStack(o.unique(o.map(this,function(G){return o.find(E,G)})),"find",E)}},clone:function(G){var E=this.map(function(){if(!o.support.noCloneEvent&&!o.isXMLDoc(this)){var I=this.outerHTML;if(!I){var J=this.ownerDocument.createElement("div");J.appendChild(this.cloneNode(true));I=J.innerHTML}return o.clean([I.replace(/ jQuery\d+="(?:\d+|null)"/g,"").replace(/^\s*/,"")])[0]}else{return this.cloneNode(true)}});if(G===true){var H=this.find("*").andSelf(),F=0;E.find("*").andSelf().each(function(){if(this.nodeName!==H[F].nodeName){return}var I=o.data(H[F],"events");for(var K in I){for(var J in I[K]){o.event.add(this,K,I[K][J],I[K][J].data)}}F++})}return E},filter:function(E){return this.pushStack(o.isFunction(E)&&o.grep(this,function(G,F){return E.call(G,F)})||o.multiFilter(E,o.grep(this,function(F){return F.nodeType===1})),"filter",E)},closest:function(E){var G=o.expr.match.POS.test(E)?o(E):null,F=0;return this.map(function(){var H=this;while(H&&H.ownerDocument){if(G?G.index(H)>-1:o(H).is(E)){o.data(H,"closest",F);return H}H=H.parentNode;F++}})},not:function(E){if(typeof E==="string"){if(f.test(E)){return this.pushStack(o.multiFilter(E,this,true),"not",E)}else{E=o.multiFilter(E,this)}}var F=E.length&&E[E.length-1]!==g&&!E.nodeType;return this.filter(function(){return F?o.inArray(this,E)<0:this!=E})},add:function(E){return this.pushStack(o.unique(o.merge(this.get(),typeof E==="string"?o(E):o.makeArray(E))))},is:function(E){return !!E&&o.multiFilter(E,this).length>0},hasClass:function(E){return !!E&&this.is("."+E)},val:function(K){if(K===g){var E=this[0];if(E){if(o.nodeName(E,"option")){return(E.attributes.value||{}).specified?E.value:E.text}if(o.nodeName(E,"select")){var I=E.selectedIndex,L=[],M=E.options,H=E.type=="select-one";if(I<0){return null}for(var F=H?I:0,J=H?I+1:M.length;F=0||o.inArray(this.name,K)>=0)}else{if(o.nodeName(this,"select")){var N=o.makeArray(K);o("option",this).each(function(){this.selected=(o.inArray(this.value,N)>=0||o.inArray(this.text,N)>=0)});if(!N.length){this.selectedIndex=-1}}else{this.value=K}}})},html:function(E){return E===g?(this[0]?this[0].innerHTML.replace(/ jQuery\d+="(?:\d+|null)"/g,""):null):this.empty().append(E)},replaceWith:function(E){return this.after(E).remove()},eq:function(E){return this.slice(E,+E+1)},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments),"slice",Array.prototype.slice.call(arguments).join(","))},map:function(E){return this.pushStack(o.map(this,function(G,F){return E.call(G,F,G)}))},andSelf:function(){return this.add(this.prevObject)},domManip:function(J,M,L){if(this[0]){var I=(this[0].ownerDocument||this[0]).createDocumentFragment(),F=o.clean(J,(this[0].ownerDocument||this[0]),I),H=I.firstChild;if(H){for(var G=0,E=this.length;G1||G>0?I.cloneNode(true):I)}}if(F){o.each(F,z)}}return this;function K(N,O){return M&&o.nodeName(N,"table")&&o.nodeName(O,"tr")?(N.getElementsByTagName("tbody")[0]||N.appendChild(N.ownerDocument.createElement("tbody"))):N}}};o.fn.init.prototype=o.fn;function z(E,F){if(F.src){o.ajax({url:F.src,async:false,dataType:"script"})}else{o.globalEval(F.text||F.textContent||F.innerHTML||"")}if(F.parentNode){F.parentNode.removeChild(F)}}function e(){return +new Date}o.extend=o.fn.extend=function(){var J=arguments[0]||{},H=1,I=arguments.length,E=false,G;if(typeof J==="boolean"){E=J;J=arguments[1]||{};H=2}if(typeof J!=="object"&&!o.isFunction(J)){J={}}if(I==H){J=this;--H}for(;H-1}},swap:function(H,G,I){var E={};for(var F in G){E[F]=H.style[F];H.style[F]=G[F]}I.call(H);for(var F in G){H.style[F]=E[F]}},css:function(H,F,J,E){if(F=="width"||F=="height"){var L,G={position:"absolute",visibility:"hidden",display:"block"},K=F=="width"?["Left","Right"]:["Top","Bottom"];function I(){L=F=="width"?H.offsetWidth:H.offsetHeight;if(E==="border"){return}o.each(K,function(){if(!E){L-=parseFloat(o.curCSS(H,"padding"+this,true))||0}if(E==="margin"){L+=parseFloat(o.curCSS(H,"margin"+this,true))||0}else{L-=parseFloat(o.curCSS(H,"border"+this+"Width",true))||0}})}if(H.offsetWidth!==0){I()}else{o.swap(H,G,I)}return Math.max(0,Math.round(L))}return o.curCSS(H,F,J)},curCSS:function(I,F,G){var L,E=I.style;if(F=="opacity"&&!o.support.opacity){L=o.attr(E,"opacity");return L==""?"1":L}if(F.match(/float/i)){F=w}if(!G&&E&&E[F]){L=E[F]}else{if(q.getComputedStyle){if(F.match(/float/i)){F="float"}F=F.replace(/([A-Z])/g,"-$1").toLowerCase();var M=q.getComputedStyle(I,null);if(M){L=M.getPropertyValue(F)}if(F=="opacity"&&L==""){L="1"}}else{if(I.currentStyle){var J=F.replace(/\-(\w)/g,function(N,O){return O.toUpperCase()});L=I.currentStyle[F]||I.currentStyle[J];if(!/^\d+(px)?$/i.test(L)&&/^\d/.test(L)){var H=E.left,K=I.runtimeStyle.left;I.runtimeStyle.left=I.currentStyle.left;E.left=L||0;L=E.pixelLeft+"px";E.left=H;I.runtimeStyle.left=K}}}}return L},clean:function(F,K,I){K=K||document;if(typeof K.createElement==="undefined"){K=K.ownerDocument||K[0]&&K[0].ownerDocument||document}if(!I&&F.length===1&&typeof F[0]==="string"){var H=/^<(\w+)\s*\/?>$/.exec(F[0]);if(H){return[K.createElement(H[1])]}}var G=[],E=[],L=K.createElement("div");o.each(F,function(P,S){if(typeof S==="number"){S+=""}if(!S){return}if(typeof S==="string"){S=S.replace(/(<(\w+)[^>]*?)\/>/g,function(U,V,T){return T.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?U:V+">"});var O=S.replace(/^\s+/,"").substring(0,10).toLowerCase();var Q=!O.indexOf("",""]||!O.indexOf("",""]||O.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"","
"]||!O.indexOf("",""]||(!O.indexOf("",""]||!O.indexOf("",""]||!o.support.htmlSerialize&&[1,"div
","
"]||[0,"",""];L.innerHTML=Q[1]+S+Q[2];while(Q[0]--){L=L.lastChild}if(!o.support.tbody){var R=/"&&!R?L.childNodes:[];for(var M=N.length-1;M>=0;--M){if(o.nodeName(N[M],"tbody")&&!N[M].childNodes.length){N[M].parentNode.removeChild(N[M])}}}if(!o.support.leadingWhitespace&&/^\s/.test(S)){L.insertBefore(K.createTextNode(S.match(/^\s*/)[0]),L.firstChild)}S=o.makeArray(L.childNodes)}if(S.nodeType){G.push(S)}else{G=o.merge(G,S)}});if(I){for(var J=0;G[J];J++){if(o.nodeName(G[J],"script")&&(!G[J].type||G[J].type.toLowerCase()==="text/javascript")){E.push(G[J].parentNode?G[J].parentNode.removeChild(G[J]):G[J])}else{if(G[J].nodeType===1){G.splice.apply(G,[J+1,0].concat(o.makeArray(G[J].getElementsByTagName("script"))))}I.appendChild(G[J])}}return E}return G},attr:function(J,G,K){if(!J||J.nodeType==3||J.nodeType==8){return g}var H=!o.isXMLDoc(J),L=K!==g;G=H&&o.props[G]||G;if(J.tagName){var F=/href|src|style/.test(G);if(G=="selected"&&J.parentNode){J.parentNode.selectedIndex}if(G in J&&H&&!F){if(L){if(G=="type"&&o.nodeName(J,"input")&&J.parentNode){throw"type property can't be changed"}J[G]=K}if(o.nodeName(J,"form")&&J.getAttributeNode(G)){return J.getAttributeNode(G).nodeValue}if(G=="tabIndex"){var I=J.getAttributeNode("tabIndex");return I&&I.specified?I.value:J.nodeName.match(/(button|input|object|select|textarea)/i)?0:J.nodeName.match(/^(a|area)$/i)&&J.href?0:g}return J[G]}if(!o.support.style&&H&&G=="style"){return o.attr(J.style,"cssText",K)}if(L){J.setAttribute(G,""+K)}var E=!o.support.hrefNormalized&&H&&F?J.getAttribute(G,2):J.getAttribute(G);return E===null?g:E}if(!o.support.opacity&&G=="opacity"){if(L){J.zoom=1;J.filter=(J.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(K)+""=="NaN"?"":"alpha(opacity="+K*100+")")}return J.filter&&J.filter.indexOf("opacity=")>=0?(parseFloat(J.filter.match(/opacity=([^)]*)/)[1])/100)+"":""}G=G.replace(/-([a-z])/ig,function(M,N){return N.toUpperCase()});if(L){J[G]=K}return J[G]},trim:function(E){return(E||"").replace(/^\s+|\s+$/g,"")},makeArray:function(G){var E=[];if(G!=null){var F=G.length;if(F==null||typeof G==="string"||o.isFunction(G)||G.setInterval){E[0]=G}else{while(F){E[--F]=G[F]}}}return E},inArray:function(G,H){for(var E=0,F=H.length;E0?this.clone(true):this).get();o.fn[F].apply(o(L[K]),I);J=J.concat(I)}return this.pushStack(J,E,G)}});o.each({removeAttr:function(E){o.attr(this,E,"");if(this.nodeType==1){this.removeAttribute(E)}},addClass:function(E){o.className.add(this,E)},removeClass:function(E){o.className.remove(this,E)},toggleClass:function(F,E){if(typeof E!=="boolean"){E=!o.className.has(this,F)}o.className[E?"add":"remove"](this,F)},remove:function(E){if(!E||o.filter(E,[this]).length){o("*",this).add([this]).each(function(){o.event.remove(this);o.removeData(this)});if(this.parentNode){this.parentNode.removeChild(this)}}},empty:function(){o(this).children().remove();while(this.firstChild){this.removeChild(this.firstChild)}}},function(E,F){o.fn[E]=function(){return this.each(F,arguments)}});function j(E,F){return E[0]&&parseInt(o.curCSS(E[0],F,true),10)||0}var h="jQuery"+e(),v=0,A={};o.extend({cache:{},data:function(F,E,G){F=F==l?A:F;var H=F[h];if(!H){H=F[h]=++v}if(E&&!o.cache[H]){o.cache[H]={}}if(G!==g){o.cache[H][E]=G}return E?o.cache[H][E]:H},removeData:function(F,E){F=F==l?A:F;var H=F[h];if(E){if(o.cache[H]){delete o.cache[H][E];E="";for(E in o.cache[H]){break}if(!E){o.removeData(F)}}}else{try{delete F[h]}catch(G){if(F.removeAttribute){F.removeAttribute(h)}}delete o.cache[H]}},queue:function(F,E,H){if(F){E=(E||"fx")+"queue";var G=o.data(F,E);if(!G||o.isArray(H)){G=o.data(F,E,o.makeArray(H))}else{if(H){G.push(H)}}}return G},dequeue:function(H,G){var E=o.queue(H,G),F=E.shift();if(!G||G==="fx"){F=E[0]}if(F!==g){F.call(H)}}});o.fn.extend({data:function(E,G){var H=E.split(".");H[1]=H[1]?"."+H[1]:"";if(G===g){var F=this.triggerHandler("getData"+H[1]+"!",[H[0]]);if(F===g&&this.length){F=o.data(this[0],E)}return F===g&&H[1]?this.data(H[0]):F}else{return this.trigger("setData"+H[1]+"!",[H[0],G]).each(function(){o.data(this,E,G)})}},removeData:function(E){return this.each(function(){o.removeData(this,E)})},queue:function(E,F){if(typeof E!=="string"){F=E;E="fx"}if(F===g){return o.queue(this[0],E)}return this.each(function(){var G=o.queue(this,E,F);if(E=="fx"&&G.length==1){G[0].call(this)}})},dequeue:function(E){return this.each(function(){o.dequeue(this,E)})}}); -/* - * Sizzle CSS Selector Engine - v0.9.3 - * Copyright 2009, The Dojo Foundation - * Released under the MIT, BSD, and GPL Licenses. - * More information: http://sizzlejs.com/ - */ -(function(){var R=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?/g,L=0,H=Object.prototype.toString;var F=function(Y,U,ab,ac){ab=ab||[];U=U||document;if(U.nodeType!==1&&U.nodeType!==9){return[]}if(!Y||typeof Y!=="string"){return ab}var Z=[],W,af,ai,T,ad,V,X=true;R.lastIndex=0;while((W=R.exec(Y))!==null){Z.push(W[1]);if(W[2]){V=RegExp.rightContext;break}}if(Z.length>1&&M.exec(Y)){if(Z.length===2&&I.relative[Z[0]]){af=J(Z[0]+Z[1],U)}else{af=I.relative[Z[0]]?[U]:F(Z.shift(),U);while(Z.length){Y=Z.shift();if(I.relative[Y]){Y+=Z.shift()}af=J(Y,af)}}}else{var ae=ac?{expr:Z.pop(),set:E(ac)}:F.find(Z.pop(),Z.length===1&&U.parentNode?U.parentNode:U,Q(U));af=F.filter(ae.expr,ae.set);if(Z.length>0){ai=E(af)}else{X=false}while(Z.length){var ah=Z.pop(),ag=ah;if(!I.relative[ah]){ah=""}else{ag=Z.pop()}if(ag==null){ag=U}I.relative[ah](ai,ag,Q(U))}}if(!ai){ai=af}if(!ai){throw"Syntax error, unrecognized expression: "+(ah||Y)}if(H.call(ai)==="[object Array]"){if(!X){ab.push.apply(ab,ai)}else{if(U.nodeType===1){for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&(ai[aa]===true||ai[aa].nodeType===1&&K(U,ai[aa]))){ab.push(af[aa])}}}else{for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&ai[aa].nodeType===1){ab.push(af[aa])}}}}}else{E(ai,ab)}if(V){F(V,U,ab,ac);if(G){hasDuplicate=false;ab.sort(G);if(hasDuplicate){for(var aa=1;aa":function(Z,U,aa){var X=typeof U==="string";if(X&&!/\W/.test(U)){U=aa?U:U.toUpperCase();for(var V=0,T=Z.length;V=0)){if(!V){T.push(Y)}}else{if(V){U[X]=false}}}}return false},ID:function(T){return T[1].replace(/\\/g,"")},TAG:function(U,T){for(var V=0;T[V]===false;V++){}return T[V]&&Q(T[V])?U[1]:U[1].toUpperCase()},CHILD:function(T){if(T[1]=="nth"){var U=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(T[2]=="even"&&"2n"||T[2]=="odd"&&"2n+1"||!/\D/.test(T[2])&&"0n+"+T[2]||T[2]);T[2]=(U[1]+(U[2]||1))-0;T[3]=U[3]-0}T[0]=L++;return T},ATTR:function(X,U,V,T,Y,Z){var W=X[1].replace(/\\/g,"");if(!Z&&I.attrMap[W]){X[1]=I.attrMap[W]}if(X[2]==="~="){X[4]=" "+X[4]+" "}return X},PSEUDO:function(X,U,V,T,Y){if(X[1]==="not"){if(X[3].match(R).length>1||/^\w/.test(X[3])){X[3]=F(X[3],null,null,U)}else{var W=F.filter(X[3],U,V,true^Y);if(!V){T.push.apply(T,W)}return false}}else{if(I.match.POS.test(X[0])||I.match.CHILD.test(X[0])){return true}}return X},POS:function(T){T.unshift(true);return T}},filters:{enabled:function(T){return T.disabled===false&&T.type!=="hidden"},disabled:function(T){return T.disabled===true},checked:function(T){return T.checked===true},selected:function(T){T.parentNode.selectedIndex;return T.selected===true},parent:function(T){return !!T.firstChild},empty:function(T){return !T.firstChild},has:function(V,U,T){return !!F(T[3],V).length},header:function(T){return/h\d/i.test(T.nodeName)},text:function(T){return"text"===T.type},radio:function(T){return"radio"===T.type},checkbox:function(T){return"checkbox"===T.type},file:function(T){return"file"===T.type},password:function(T){return"password"===T.type},submit:function(T){return"submit"===T.type},image:function(T){return"image"===T.type},reset:function(T){return"reset"===T.type},button:function(T){return"button"===T.type||T.nodeName.toUpperCase()==="BUTTON"},input:function(T){return/input|select|textarea|button/i.test(T.nodeName)}},setFilters:{first:function(U,T){return T===0},last:function(V,U,T,W){return U===W.length-1},even:function(U,T){return T%2===0},odd:function(U,T){return T%2===1},lt:function(V,U,T){return UT[3]-0},nth:function(V,U,T){return T[3]-0==U},eq:function(V,U,T){return T[3]-0==U}},filter:{PSEUDO:function(Z,V,W,aa){var U=V[1],X=I.filters[U];if(X){return X(Z,W,V,aa)}else{if(U==="contains"){return(Z.textContent||Z.innerText||"").indexOf(V[3])>=0}else{if(U==="not"){var Y=V[3];for(var W=0,T=Y.length;W=0)}}},ID:function(U,T){return U.nodeType===1&&U.getAttribute("id")===T},TAG:function(U,T){return(T==="*"&&U.nodeType===1)||U.nodeName===T},CLASS:function(U,T){return(" "+(U.className||U.getAttribute("class"))+" ").indexOf(T)>-1},ATTR:function(Y,W){var V=W[1],T=I.attrHandle[V]?I.attrHandle[V](Y):Y[V]!=null?Y[V]:Y.getAttribute(V),Z=T+"",X=W[2],U=W[4];return T==null?X==="!=":X==="="?Z===U:X==="*="?Z.indexOf(U)>=0:X==="~="?(" "+Z+" ").indexOf(U)>=0:!U?Z&&T!==false:X==="!="?Z!=U:X==="^="?Z.indexOf(U)===0:X==="$="?Z.substr(Z.length-U.length)===U:X==="|="?Z===U||Z.substr(0,U.length+1)===U+"-":false},POS:function(X,U,V,Y){var T=U[2],W=I.setFilters[T];if(W){return W(X,V,U,Y)}}}};var M=I.match.POS;for(var O in I.match){I.match[O]=RegExp(I.match[O].source+/(?![^\[]*\])(?![^\(]*\))/.source)}var E=function(U,T){U=Array.prototype.slice.call(U);if(T){T.push.apply(T,U);return T}return U};try{Array.prototype.slice.call(document.documentElement.childNodes)}catch(N){E=function(X,W){var U=W||[];if(H.call(X)==="[object Array]"){Array.prototype.push.apply(U,X)}else{if(typeof X.length==="number"){for(var V=0,T=X.length;V";var T=document.documentElement;T.insertBefore(U,T.firstChild);if(!!document.getElementById(V)){I.find.ID=function(X,Y,Z){if(typeof Y.getElementById!=="undefined"&&!Z){var W=Y.getElementById(X[1]);return W?W.id===X[1]||typeof W.getAttributeNode!=="undefined"&&W.getAttributeNode("id").nodeValue===X[1]?[W]:g:[]}};I.filter.ID=function(Y,W){var X=typeof Y.getAttributeNode!=="undefined"&&Y.getAttributeNode("id");return Y.nodeType===1&&X&&X.nodeValue===W}}T.removeChild(U)})();(function(){var T=document.createElement("div");T.appendChild(document.createComment(""));if(T.getElementsByTagName("*").length>0){I.find.TAG=function(U,Y){var X=Y.getElementsByTagName(U[1]);if(U[1]==="*"){var W=[];for(var V=0;X[V];V++){if(X[V].nodeType===1){W.push(X[V])}}X=W}return X}}T.innerHTML="";if(T.firstChild&&typeof T.firstChild.getAttribute!=="undefined"&&T.firstChild.getAttribute("href")!=="#"){I.attrHandle.href=function(U){return U.getAttribute("href",2)}}})();if(document.querySelectorAll){(function(){var T=F,U=document.createElement("div");U.innerHTML="

";if(U.querySelectorAll&&U.querySelectorAll(".TEST").length===0){return}F=function(Y,X,V,W){X=X||document;if(!W&&X.nodeType===9&&!Q(X)){try{return E(X.querySelectorAll(Y),V)}catch(Z){}}return T(Y,X,V,W)};F.find=T.find;F.filter=T.filter;F.selectors=T.selectors;F.matches=T.matches})()}if(document.getElementsByClassName&&document.documentElement.getElementsByClassName){(function(){var T=document.createElement("div");T.innerHTML="
";if(T.getElementsByClassName("e").length===0){return}T.lastChild.className="e";if(T.getElementsByClassName("e").length===1){return}I.order.splice(1,0,"CLASS");I.find.CLASS=function(U,V,W){if(typeof V.getElementsByClassName!=="undefined"&&!W){return V.getElementsByClassName(U[1])}}})()}function P(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W0){X=T;break}}}T=T[U]}ad[W]=X}}}var K=document.compareDocumentPosition?function(U,T){return U.compareDocumentPosition(T)&16}:function(U,T){return U!==T&&(U.contains?U.contains(T):true)};var Q=function(T){return T.nodeType===9&&T.documentElement.nodeName!=="HTML"||!!T.ownerDocument&&Q(T.ownerDocument)};var J=function(T,aa){var W=[],X="",Y,V=aa.nodeType?[aa]:aa;while((Y=I.match.PSEUDO.exec(T))){X+=Y[0];T=T.replace(I.match.PSEUDO,"")}T=I.relative[T]?T+"*":T;for(var Z=0,U=V.length;Z0||T.offsetHeight>0};F.selectors.filters.animated=function(T){return o.grep(o.timers,function(U){return T===U.elem}).length};o.multiFilter=function(V,T,U){if(U){V=":not("+V+")"}return F.matches(V,T)};o.dir=function(V,U){var T=[],W=V[U];while(W&&W!=document){if(W.nodeType==1){T.push(W)}W=W[U]}return T};o.nth=function(X,T,V,W){T=T||1;var U=0;for(;X;X=X[V]){if(X.nodeType==1&&++U==T){break}}return X};o.sibling=function(V,U){var T=[];for(;V;V=V.nextSibling){if(V.nodeType==1&&V!=U){T.push(V)}}return T};return;l.Sizzle=F})();o.event={add:function(I,F,H,K){if(I.nodeType==3||I.nodeType==8){return}if(I.setInterval&&I!=l){I=l}if(!H.guid){H.guid=this.guid++}if(K!==g){var G=H;H=this.proxy(G);H.data=K}var E=o.data(I,"events")||o.data(I,"events",{}),J=o.data(I,"handle")||o.data(I,"handle",function(){return typeof o!=="undefined"&&!o.event.triggered?o.event.handle.apply(arguments.callee.elem,arguments):g});J.elem=I;o.each(F.split(/\s+/),function(M,N){var O=N.split(".");N=O.shift();H.type=O.slice().sort().join(".");var L=E[N];if(o.event.specialAll[N]){o.event.specialAll[N].setup.call(I,K,O)}if(!L){L=E[N]={};if(!o.event.special[N]||o.event.special[N].setup.call(I,K,O)===false){if(I.addEventListener){I.addEventListener(N,J,false)}else{if(I.attachEvent){I.attachEvent("on"+N,J)}}}}L[H.guid]=H;o.event.global[N]=true});I=null},guid:1,global:{},remove:function(K,H,J){if(K.nodeType==3||K.nodeType==8){return}var G=o.data(K,"events"),F,E;if(G){if(H===g||(typeof H==="string"&&H.charAt(0)==".")){for(var I in G){this.remove(K,I+(H||""))}}else{if(H.type){J=H.handler;H=H.type}o.each(H.split(/\s+/),function(M,O){var Q=O.split(".");O=Q.shift();var N=RegExp("(^|\\.)"+Q.slice().sort().join(".*\\.")+"(\\.|$)");if(G[O]){if(J){delete G[O][J.guid]}else{for(var P in G[O]){if(N.test(G[O][P].type)){delete G[O][P]}}}if(o.event.specialAll[O]){o.event.specialAll[O].teardown.call(K,Q)}for(F in G[O]){break}if(!F){if(!o.event.special[O]||o.event.special[O].teardown.call(K,Q)===false){if(K.removeEventListener){K.removeEventListener(O,o.data(K,"handle"),false)}else{if(K.detachEvent){K.detachEvent("on"+O,o.data(K,"handle"))}}}F=null;delete G[O]}}})}for(F in G){break}if(!F){var L=o.data(K,"handle");if(L){L.elem=null}o.removeData(K,"events");o.removeData(K,"handle")}}},trigger:function(I,K,H,E){var G=I.type||I;if(!E){I=typeof I==="object"?I[h]?I:o.extend(o.Event(G),I):o.Event(G);if(G.indexOf("!")>=0){I.type=G=G.slice(0,-1);I.exclusive=true}if(!H){I.stopPropagation();if(this.global[G]){o.each(o.cache,function(){if(this.events&&this.events[G]){o.event.trigger(I,K,this.handle.elem)}})}}if(!H||H.nodeType==3||H.nodeType==8){return g}I.result=g;I.target=H;K=o.makeArray(K);K.unshift(I)}I.currentTarget=H;var J=o.data(H,"handle");if(J){J.apply(H,K)}if((!H[G]||(o.nodeName(H,"a")&&G=="click"))&&H["on"+G]&&H["on"+G].apply(H,K)===false){I.result=false}if(!E&&H[G]&&!I.isDefaultPrevented()&&!(o.nodeName(H,"a")&&G=="click")){this.triggered=true;try{H[G]()}catch(L){}}this.triggered=false;if(!I.isPropagationStopped()){var F=H.parentNode||H.ownerDocument;if(F){o.event.trigger(I,K,F,true)}}},handle:function(K){var J,E;K=arguments[0]=o.event.fix(K||l.event);K.currentTarget=this;var L=K.type.split(".");K.type=L.shift();J=!L.length&&!K.exclusive;var I=RegExp("(^|\\.)"+L.slice().sort().join(".*\\.")+"(\\.|$)");E=(o.data(this,"events")||{})[K.type];for(var G in E){var H=E[G];if(J||I.test(H.type)){K.handler=H;K.data=H.data;var F=H.apply(this,arguments);if(F!==g){K.result=F;if(F===false){K.preventDefault();K.stopPropagation()}}if(K.isImmediatePropagationStopped()){break}}}},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),fix:function(H){if(H[h]){return H}var F=H;H=o.Event(F);for(var G=this.props.length,J;G;){J=this.props[--G];H[J]=F[J]}if(!H.target){H.target=H.srcElement||document}if(H.target.nodeType==3){H.target=H.target.parentNode}if(!H.relatedTarget&&H.fromElement){H.relatedTarget=H.fromElement==H.target?H.toElement:H.fromElement}if(H.pageX==null&&H.clientX!=null){var I=document.documentElement,E=document.body;H.pageX=H.clientX+(I&&I.scrollLeft||E&&E.scrollLeft||0)-(I.clientLeft||0);H.pageY=H.clientY+(I&&I.scrollTop||E&&E.scrollTop||0)-(I.clientTop||0)}if(!H.which&&((H.charCode||H.charCode===0)?H.charCode:H.keyCode)){H.which=H.charCode||H.keyCode}if(!H.metaKey&&H.ctrlKey){H.metaKey=H.ctrlKey}if(!H.which&&H.button){H.which=(H.button&1?1:(H.button&2?3:(H.button&4?2:0)))}return H},proxy:function(F,E){E=E||function(){return F.apply(this,arguments)};E.guid=F.guid=F.guid||E.guid||this.guid++;return E},special:{ready:{setup:B,teardown:function(){}}},specialAll:{live:{setup:function(E,F){o.event.add(this,F[0],c)},teardown:function(G){if(G.length){var E=0,F=RegExp("(^|\\.)"+G[0]+"(\\.|$)");o.each((o.data(this,"events").live||{}),function(){if(F.test(this.type)){E++}});if(E<1){o.event.remove(this,G[0],c)}}}}}};o.Event=function(E){if(!this.preventDefault){return new o.Event(E)}if(E&&E.type){this.originalEvent=E;this.type=E.type}else{this.type=E}this.timeStamp=e();this[h]=true};function k(){return false}function u(){return true}o.Event.prototype={preventDefault:function(){this.isDefaultPrevented=u;var E=this.originalEvent;if(!E){return}if(E.preventDefault){E.preventDefault()}E.returnValue=false},stopPropagation:function(){this.isPropagationStopped=u;var E=this.originalEvent;if(!E){return}if(E.stopPropagation){E.stopPropagation()}E.cancelBubble=true},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=u;this.stopPropagation()},isDefaultPrevented:k,isPropagationStopped:k,isImmediatePropagationStopped:k};var a=function(F){var E=F.relatedTarget;while(E&&E!=this){try{E=E.parentNode}catch(G){E=this}}if(E!=this){F.type=F.data;o.event.handle.apply(this,arguments)}};o.each({mouseover:"mouseenter",mouseout:"mouseleave"},function(F,E){o.event.special[E]={setup:function(){o.event.add(this,F,a,E)},teardown:function(){o.event.remove(this,F,a)}}});o.fn.extend({bind:function(F,G,E){return F=="unload"?this.one(F,G,E):this.each(function(){o.event.add(this,F,E||G,E&&G)})},one:function(G,H,F){var E=o.event.proxy(F||H,function(I){o(this).unbind(I,E);return(F||H).apply(this,arguments)});return this.each(function(){o.event.add(this,G,E,F&&H)})},unbind:function(F,E){return this.each(function(){o.event.remove(this,F,E)})},trigger:function(E,F){return this.each(function(){o.event.trigger(E,F,this)})},triggerHandler:function(E,G){if(this[0]){var F=o.Event(E);F.preventDefault();F.stopPropagation();o.event.trigger(F,G,this[0]);return F.result}},toggle:function(G){var E=arguments,F=1;while(F=0){var E=G.slice(I,G.length);G=G.slice(0,I)}var H="GET";if(J){if(o.isFunction(J)){K=J;J=null}else{if(typeof J==="object"){J=o.param(J);H="POST"}}}var F=this;o.ajax({url:G,type:H,dataType:"html",data:J,complete:function(M,L){if(L=="success"||L=="notmodified"){F.html(E?o("
").append(M.responseText.replace(//g,"")).find(E):M.responseText)}if(K){F.each(K,[M.responseText,L,M])}}});return this},serialize:function(){return o.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?o.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password|search/i.test(this.type))}).map(function(E,F){var G=o(this).val();return G==null?null:o.isArray(G)?o.map(G,function(I,H){return{name:F.name,value:I}}):{name:F.name,value:G}}).get()}});o.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(E,F){o.fn[F]=function(G){return this.bind(F,G)}});var r=e();o.extend({get:function(E,G,H,F){if(o.isFunction(G)){H=G;G=null}return o.ajax({type:"GET",url:E,data:G,success:H,dataType:F})},getScript:function(E,F){return o.get(E,null,F,"script")},getJSON:function(E,F,G){return o.get(E,F,G,"json")},post:function(E,G,H,F){if(o.isFunction(G)){H=G;G={}}return o.ajax({type:"POST",url:E,data:G,success:H,dataType:F})},ajaxSetup:function(E){o.extend(o.ajaxSettings,E)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return l.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest()},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(M){M=o.extend(true,M,o.extend(true,{},o.ajaxSettings,M));var W,F=/=\?(&|$)/g,R,V,G=M.type.toUpperCase();if(M.data&&M.processData&&typeof M.data!=="string"){M.data=o.param(M.data)}if(M.dataType=="jsonp"){if(G=="GET"){if(!M.url.match(F)){M.url+=(M.url.match(/\?/)?"&":"?")+(M.jsonp||"callback")+"=?"}}else{if(!M.data||!M.data.match(F)){M.data=(M.data?M.data+"&":"")+(M.jsonp||"callback")+"=?"}}M.dataType="json"}if(M.dataType=="json"&&(M.data&&M.data.match(F)||M.url.match(F))){W="jsonp"+r++;if(M.data){M.data=(M.data+"").replace(F,"="+W+"$1")}M.url=M.url.replace(F,"="+W+"$1");M.dataType="script";l[W]=function(X){V=X;I();L();l[W]=g;try{delete l[W]}catch(Y){}if(H){H.removeChild(T)}}}if(M.dataType=="script"&&M.cache==null){M.cache=false}if(M.cache===false&&G=="GET"){var E=e();var U=M.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+E+"$2");M.url=U+((U==M.url)?(M.url.match(/\?/)?"&":"?")+"_="+E:"")}if(M.data&&G=="GET"){M.url+=(M.url.match(/\?/)?"&":"?")+M.data;M.data=null}if(M.global&&!o.active++){o.event.trigger("ajaxStart")}var Q=/^(\w+:)?\/\/([^\/?#]+)/.exec(M.url);if(M.dataType=="script"&&G=="GET"&&Q&&(Q[1]&&Q[1]!=location.protocol||Q[2]!=location.host)){var H=document.getElementsByTagName("head")[0];var T=document.createElement("script");T.src=M.url;if(M.scriptCharset){T.charset=M.scriptCharset}if(!W){var O=false;T.onload=T.onreadystatechange=function(){if(!O&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){O=true;I();L();T.onload=T.onreadystatechange=null;H.removeChild(T)}}}H.appendChild(T);return g}var K=false;var J=M.xhr();if(M.username){J.open(G,M.url,M.async,M.username,M.password)}else{J.open(G,M.url,M.async)}try{if(M.data){J.setRequestHeader("Content-Type",M.contentType)}if(M.ifModified){J.setRequestHeader("If-Modified-Since",o.lastModified[M.url]||"Thu, 01 Jan 1970 00:00:00 GMT")}J.setRequestHeader("X-Requested-With","XMLHttpRequest");J.setRequestHeader("Accept",M.dataType&&M.accepts[M.dataType]?M.accepts[M.dataType]+", */*":M.accepts._default)}catch(S){}if(M.beforeSend&&M.beforeSend(J,M)===false){if(M.global&&!--o.active){o.event.trigger("ajaxStop")}J.abort();return false}if(M.global){o.event.trigger("ajaxSend",[J,M])}var N=function(X){if(J.readyState==0){if(P){clearInterval(P);P=null;if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}}else{if(!K&&J&&(J.readyState==4||X=="timeout")){K=true;if(P){clearInterval(P);P=null}R=X=="timeout"?"timeout":!o.httpSuccess(J)?"error":M.ifModified&&o.httpNotModified(J,M.url)?"notmodified":"success";if(R=="success"){try{V=o.httpData(J,M.dataType,M)}catch(Z){R="parsererror"}}if(R=="success"){var Y;try{Y=J.getResponseHeader("Last-Modified")}catch(Z){}if(M.ifModified&&Y){o.lastModified[M.url]=Y}if(!W){I()}}else{o.handleError(M,J,R)}L();if(X){J.abort()}if(M.async){J=null}}}};if(M.async){var P=setInterval(N,13);if(M.timeout>0){setTimeout(function(){if(J&&!K){N("timeout")}},M.timeout)}}try{J.send(M.data)}catch(S){o.handleError(M,J,null,S)}if(!M.async){N()}function I(){if(M.success){M.success(V,R)}if(M.global){o.event.trigger("ajaxSuccess",[J,M])}}function L(){if(M.complete){M.complete(J,R)}if(M.global){o.event.trigger("ajaxComplete",[J,M])}if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}return J},handleError:function(F,H,E,G){if(F.error){F.error(H,E,G)}if(F.global){o.event.trigger("ajaxError",[H,F,G])}},active:0,httpSuccess:function(F){try{return !F.status&&location.protocol=="file:"||(F.status>=200&&F.status<300)||F.status==304||F.status==1223}catch(E){}return false},httpNotModified:function(G,E){try{var H=G.getResponseHeader("Last-Modified");return G.status==304||H==o.lastModified[E]}catch(F){}return false},httpData:function(J,H,G){var F=J.getResponseHeader("content-type"),E=H=="xml"||!H&&F&&F.indexOf("xml")>=0,I=E?J.responseXML:J.responseText;if(E&&I.documentElement.tagName=="parsererror"){throw"parsererror"}if(G&&G.dataFilter){I=G.dataFilter(I,H)}if(typeof I==="string"){if(H=="script"){o.globalEval(I)}if(H=="json"){I=l["eval"]("("+I+")")}}return I},param:function(E){var G=[];function H(I,J){G[G.length]=encodeURIComponent(I)+"="+encodeURIComponent(J)}if(o.isArray(E)||E.jquery){o.each(E,function(){H(this.name,this.value)})}else{for(var F in E){if(o.isArray(E[F])){o.each(E[F],function(){H(F,this)})}else{H(F,o.isFunction(E[F])?E[F]():E[F])}}}return G.join("&").replace(/%20/g,"+")}});var m={},n,d=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];function t(F,E){var G={};o.each(d.concat.apply([],d.slice(0,E)),function(){G[this]=F});return G}o.fn.extend({show:function(J,L){if(J){return this.animate(t("show",3),J,L)}else{for(var H=0,F=this.length;H").appendTo("body");K=I.css("display");if(K==="none"){K="block"}I.remove();m[G]=K}o.data(this[H],"olddisplay",K)}}for(var H=0,F=this.length;H=0;H--){if(G[H].elem==this){if(E){G[H](true)}G.splice(H,1)}}});if(!E){this.dequeue()}return this}});o.each({slideDown:t("show",1),slideUp:t("hide",1),slideToggle:t("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(E,F){o.fn[E]=function(G,H){return this.animate(F,G,H)}});o.extend({speed:function(G,H,F){var E=typeof G==="object"?G:{complete:F||!F&&H||o.isFunction(G)&&G,duration:G,easing:F&&H||H&&!o.isFunction(H)&&H};E.duration=o.fx.off?0:typeof E.duration==="number"?E.duration:o.fx.speeds[E.duration]||o.fx.speeds._default;E.old=E.complete;E.complete=function(){if(E.queue!==false){o(this).dequeue()}if(o.isFunction(E.old)){E.old.call(this)}};return E},easing:{linear:function(G,H,E,F){return E+F*G},swing:function(G,H,E,F){return((-Math.cos(G*Math.PI)/2)+0.5)*F+E}},timers:[],fx:function(F,E,G){this.options=E;this.elem=F;this.prop=G;if(!E.orig){E.orig={}}}});o.fx.prototype={update:function(){if(this.options.step){this.options.step.call(this.elem,this.now,this)}(o.fx.step[this.prop]||o.fx.step._default)(this);if((this.prop=="height"||this.prop=="width")&&this.elem.style){this.elem.style.display="block"}},cur:function(F){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null)){return this.elem[this.prop]}var E=parseFloat(o.css(this.elem,this.prop,F));return E&&E>-10000?E:parseFloat(o.curCSS(this.elem,this.prop))||0},custom:function(I,H,G){this.startTime=e();this.start=I;this.end=H;this.unit=G||this.unit||"px";this.now=this.start;this.pos=this.state=0;var E=this;function F(J){return E.step(J)}F.elem=this.elem;if(F()&&o.timers.push(F)&&!n){n=setInterval(function(){var K=o.timers;for(var J=0;J=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var E=true;for(var F in this.options.curAnim){if(this.options.curAnim[F]!==true){E=false}}if(E){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(o.css(this.elem,"display")=="none"){this.elem.style.display="block"}}if(this.options.hide){o(this.elem).hide()}if(this.options.hide||this.options.show){for(var I in this.options.curAnim){o.attr(this.elem.style,I,this.options.orig[I])}}this.options.complete.call(this.elem)}return false}else{var J=G-this.startTime;this.state=J/this.options.duration;this.pos=o.easing[this.options.easing||(o.easing.swing?"swing":"linear")](this.state,J,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update()}return true}};o.extend(o.fx,{speeds:{slow:600,fast:200,_default:400},step:{opacity:function(E){o.attr(E.elem.style,"opacity",E.now)},_default:function(E){if(E.elem.style&&E.elem.style[E.prop]!=null){E.elem.style[E.prop]=E.now+E.unit}else{E.elem[E.prop]=E.now}}}});if(document.documentElement.getBoundingClientRect){o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}var G=this[0].getBoundingClientRect(),J=this[0].ownerDocument,F=J.body,E=J.documentElement,L=E.clientTop||F.clientTop||0,K=E.clientLeft||F.clientLeft||0,I=G.top+(self.pageYOffset||o.boxModel&&E.scrollTop||F.scrollTop)-L,H=G.left+(self.pageXOffset||o.boxModel&&E.scrollLeft||F.scrollLeft)-K;return{top:I,left:H}}}else{o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}o.offset.initialized||o.offset.initialize();var J=this[0],G=J.offsetParent,F=J,O=J.ownerDocument,M,H=O.documentElement,K=O.body,L=O.defaultView,E=L.getComputedStyle(J,null),N=J.offsetTop,I=J.offsetLeft;while((J=J.parentNode)&&J!==K&&J!==H){M=L.getComputedStyle(J,null);N-=J.scrollTop,I-=J.scrollLeft;if(J===G){N+=J.offsetTop,I+=J.offsetLeft;if(o.offset.doesNotAddBorder&&!(o.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(J.tagName))){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}F=G,G=J.offsetParent}if(o.offset.subtractsBorderForOverflowNotVisible&&M.overflow!=="visible"){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}E=M}if(E.position==="relative"||E.position==="static"){N+=K.offsetTop,I+=K.offsetLeft}if(E.position==="fixed"){N+=Math.max(H.scrollTop,K.scrollTop),I+=Math.max(H.scrollLeft,K.scrollLeft)}return{top:N,left:I}}}o.offset={initialize:function(){if(this.initialized){return}var L=document.body,F=document.createElement("div"),H,G,N,I,M,E,J=L.style.marginTop,K='
';M={position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"};for(E in M){F.style[E]=M[E]}F.innerHTML=K;L.insertBefore(F,L.firstChild);H=F.firstChild,G=H.firstChild,I=H.nextSibling.firstChild.firstChild;this.doesNotAddBorder=(G.offsetTop!==5);this.doesAddBorderForTableAndCells=(I.offsetTop===5);H.style.overflow="hidden",H.style.position="relative";this.subtractsBorderForOverflowNotVisible=(G.offsetTop===-5);L.style.marginTop="1px";this.doesNotIncludeMarginInBodyOffset=(L.offsetTop===0);L.style.marginTop=J;L.removeChild(F);this.initialized=true},bodyOffset:function(E){o.offset.initialized||o.offset.initialize();var G=E.offsetTop,F=E.offsetLeft;if(o.offset.doesNotIncludeMarginInBodyOffset){G+=parseInt(o.curCSS(E,"marginTop",true),10)||0,F+=parseInt(o.curCSS(E,"marginLeft",true),10)||0}return{top:G,left:F}}};o.fn.extend({position:function(){var I=0,H=0,F;if(this[0]){var G=this.offsetParent(),J=this.offset(),E=/^body|html$/i.test(G[0].tagName)?{top:0,left:0}:G.offset();J.top-=j(this,"marginTop");J.left-=j(this,"marginLeft");E.top+=j(G,"borderTopWidth");E.left+=j(G,"borderLeftWidth");F={top:J.top-E.top,left:J.left-E.left}}return F},offsetParent:function(){var E=this[0].offsetParent||document.body;while(E&&(!/^body|html$/i.test(E.tagName)&&o.css(E,"position")=="static")){E=E.offsetParent}return o(E)}});o.each(["Left","Top"],function(F,E){var G="scroll"+E;o.fn[G]=function(H){if(!this[0]){return null}return H!==g?this.each(function(){this==l||this==document?l.scrollTo(!F?H:o(l).scrollLeft(),F?H:o(l).scrollTop()):this[G]=H}):this[0]==l||this[0]==document?self[F?"pageYOffset":"pageXOffset"]||o.boxModel&&document.documentElement[G]||document.body[G]:this[0][G]}});o.each(["Height","Width"],function(I,G){var E=I?"Left":"Top",H=I?"Right":"Bottom",F=G.toLowerCase();o.fn["inner"+G]=function(){return this[0]?o.css(this[0],F,false,"padding"):null};o.fn["outer"+G]=function(K){return this[0]?o.css(this[0],F,false,K?"margin":"border"):null};var J=G.toLowerCase();o.fn[J]=function(K){return this[0]==l?document.compatMode=="CSS1Compat"&&document.documentElement["client"+G]||document.body["client"+G]:this[0]==document?Math.max(document.documentElement["client"+G],document.body["scroll"+G],document.documentElement["scroll"+G],document.body["offset"+G],document.documentElement["offset"+G]):K===g?(this.length?o.css(this[0],J):null):this.css(J,typeof K==="string"?K:K+"px")}})})(); \ No newline at end of file diff --git a/resweb/media/jquery.relatize_date.js b/resweb/media/jquery.relatize_date.js deleted file mode 100644 index fc62b89..0000000 --- a/resweb/media/jquery.relatize_date.js +++ /dev/null @@ -1,95 +0,0 @@ -// All credit goes to Rick Olson. -(function($) { - $.fn.relatizeDate = function() { - return $(this).each(function() { - if ($(this).hasClass( 'relatized' )) return - $(this).text( $.relatizeDate(this) ).addClass( 'relatized' ) - }) - } - - $.relatizeDate = function(element) { - return $.relatizeDate.timeAgoInWords( new Date($(element).text()) ) - } - - // shortcut - $r = $.relatizeDate - - $.extend($.relatizeDate, { - shortDays: [ 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat' ], - days: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], - shortMonths: [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec' ], - months: [ 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December' ], - - /** - * Given a formatted string, replace the necessary items and return. - * Example: Time.now().strftime("%B %d, %Y") => February 11, 2008 - * @param {String} format The formatted string used to format the results - */ - strftime: function(date, format) { - var day = date.getDay(), month = date.getMonth(); - var hours = date.getHours(), minutes = date.getMinutes(); - - var pad = function(num) { - var string = num.toString(10); - return new Array((2 - string.length) + 1).join('0') + string - }; - - return format.replace(/\%([aAbBcdHImMpSwyY])/g, function(part) { - switch(part[1]) { - case 'a': return $r.shortDays[day]; break; - case 'A': return $r.days[day]; break; - case 'b': return $r.shortMonths[month]; break; - case 'B': return $r.months[month]; break; - case 'c': return date.toString(); break; - case 'd': return pad(date.getDate()); break; - case 'H': return pad(hours); break; - case 'I': return pad((hours + 12) % 12); break; - case 'm': return pad(month + 1); break; - case 'M': return pad(minutes); break; - case 'p': return hours > 12 ? 'PM' : 'AM'; break; - case 'S': return pad(date.getSeconds()); break; - case 'w': return day; break; - case 'y': return pad(date.getFullYear() % 100); break; - case 'Y': return date.getFullYear().toString(); break; - } - }) - }, - - timeAgoInWords: function(targetDate, includeTime) { - return $r.distanceOfTimeInWords(targetDate, new Date(), includeTime); - }, - - /** - * Return the distance of time in words between two Date's - * Example: '5 days ago', 'about an hour ago' - * @param {Date} fromTime The start date to use in the calculation - * @param {Date} toTime The end date to use in the calculation - * @param {Boolean} Include the time in the output - */ - distanceOfTimeInWords: function(fromTime, toTime, includeTime) { - var delta = parseInt((toTime.getTime() - fromTime.getTime()) / 1000); - if (delta < 60) { - return 'just now'; - } else if (delta < 120) { - return 'about a minute ago'; - } else if (delta < (45*60)) { - return (parseInt(delta / 60)).toString() + ' minutes ago'; - } else if (delta < (120*60)) { - return 'about an hour ago'; - } else if (delta < (24*60*60)) { - return 'about ' + (parseInt(delta / 3600)).toString() + ' hours ago'; - } else if (delta < (48*60*60)) { - return '1 day ago'; - } else { - var days = (parseInt(delta / 86400)).toString(); - if (days > 5) { - var fmt = '%B %d, %Y' - if (includeTime) fmt += ' %I:%M %p' - return $r.strftime(fromTime, fmt); - } else { - return days + " days ago" - } - } - } - }) -})(jQuery); diff --git a/resweb/media/poll.png b/resweb/media/poll.png deleted file mode 100755 index ca632c4efefc0e6b10d9c9018e6f76ce85cdcdd5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 627 zcmV-(0*w8MP)<{07iUtHr4?g(L&Ku_Q-kUeG#Pd8jiZO6RL7t~DC$=bar0a2!N}!Edh$g*C!3TU;kJiqP=jU?v%hrLn^-y(MjX8}@27NJOI$ z>+Xi!%F1igG*v=~M+4mOcXjc%CMK>UG;G^83kVse2{;FBa}$6NIMv$=J)sb-(tEbL zy4GwqRqC+kvRS2V+wFp)Y_)9L=#R(a2#CA;`ygu?1Vj;{kq8_Q1aO(ahmDQ$+~VRB zJb%f-1hfJJ_13hk^%s)#W?^aRWyJ3vzmm<~jK^Yt4FsRh2bSXio*5RFm*2Gvqez2w zKBTtix+Tjr=N`|`D!Q0)myFl(|o?r(Dg5Ry$)1=+(M;X233|_ zj2iwZ%hlVnvyZUu-Be%S=*;BgWmVIvhOV#E=KW;I)u|~^i$zdSzu8^X9?$a+aOZ*D zZug)+#tXtl{N6$7zn(H3I$&xKQD0F9KYTjRxvrb*+(`cW-}qgC0RTgm+Q_h_>uvx5 N002ovPDHLkV1hkfBwYXi diff --git a/resweb/media/ranger.js b/resweb/media/ranger.js deleted file mode 100644 index 60a8fdf..0000000 --- a/resweb/media/ranger.js +++ /dev/null @@ -1,24 +0,0 @@ -var poll_interval = 2; - -$(function() { - - $('.time').relatizeDate() - $('.backtrace').click(function() { - $(this).next().toggle() - return false - }) - - $('a[rel=poll]').click(function() { - var href = $(this).attr('href') - $(this).parent().text('Starting...') - $("#main").addClass('polling') - setInterval(function() { - $.ajax({dataType:'text', type:'get', url:href, success:function(data) { - $('#main').html(data) - $('#main .time').relatizeDate() - }}) - }, poll_interval * 1000) - return false - }) - -}) \ No newline at end of file diff --git a/resweb/media/reset.css b/resweb/media/reset.css deleted file mode 100644 index ed2e756..0000000 --- a/resweb/media/reset.css +++ /dev/null @@ -1,48 +0,0 @@ -html, body, div, span, applet, object, iframe, -h1, h2, h3, h4, h5, h6, p, blockquote, pre, -a, abbr, acronym, address, big, cite, code, -del, dfn, em, font, img, ins, kbd, q, s, samp, -small, strike, strong, sub, sup, tt, var, -dl, dt, dd, ul, li, -form, label, legend, -table, caption, tbody, tfoot, thead, tr, th, td { - margin: 0; - padding: 0; - border: 0; - outline: 0; - font-weight: inherit; - font-style: normal; - font-size: 100%; - font-family: inherit; -} - -:focus { - outline: 0; -} - -body { - line-height: 1; -} - -ul { - list-style: none; -} - -table { - border-collapse: collapse; - border-spacing: 0; -} - -caption, th, td { - text-align: left; - font-weight: normal; -} - -blockquote:before, blockquote:after, -q:before, q:after { - content: ""; -} - -blockquote, q { - quotes: "" ""; -} \ No newline at end of file diff --git a/resweb/media/style.css b/resweb/media/style.css deleted file mode 100644 index bd5b921..0000000 --- a/resweb/media/style.css +++ /dev/null @@ -1,83 +0,0 @@ -html { background:#efefef; font-family:Arial, Verdana, sans-serif; font-size:13px; } -body { padding:0; margin:0; } - -.header { background:#000; padding:8px 5% 0 5%; border-bottom:1px solid #444;border-bottom:5px solid #ce1212;} -.header h1 { color:#333; font-size:90%; font-weight:bold; margin-bottom:6px;} -.header ul li { display:inline;} -.header ul li a { color:#fff; text-decoration:none; margin-right:10px; display:inline-block; padding:8px; -webkit-border-top-right-radius:6px; -webkit-border-top-left-radius:6px; } -.header ul li a:hover { background:#333;} -.header ul li.current a { background:#ce1212; font-weight:bold; color:#fff;} - -.subnav { padding:2px 5% 7px 5%; background:#ce1212; font-size:90%;} -.subnav li { display:inline;} -.subnav li a { color:#fff; text-decoration:none; margin-right:10px; display:inline-block; background:#dd5b5b; padding:5px; -webkit-border-radius:3px; -moz-border-radius:3px;} -.subnav li.current a { background:#fff; font-weight:bold; color:#ce1212;} -.subnav li a:active { background:#b00909;} - -#main { padding:10px 5%; background:#fff; overflow:hidden; } -#main .logo { float:right; margin:10px;} -#main span.hl { background:#efefef; padding:2px;} -#main h1 { margin:10px 0; font-size:190%; font-weight:bold; color:#ce1212;} -#main h2 { margin:10px 0; font-size:130%;} -#main table { width:100%; margin:10px 0;} -#main table tr td, #main table tr th { border:1px solid #ccc; padding:6px;} -#main table tr th { background:#efefef; color:#888; font-size:80%; font-weight:bold;} -#main table tr td.no-data { text-align:center; padding:40px 0; color:#999; font-style:italic; font-size:130%;} -#main a { color:#111;} -#main p { margin:5px 0;} -#main p.intro { margin-bottom:15px; font-size:85%; color:#999; margin-top:0; line-height:1.3;} -#main h1.wi { margin-bottom:5px;} -#main p.sub { font-size:95%; color:#999;} - -#main table.queues { width:40%;} -#main table.queues td.queue { font-weight:bold; width:50%;} -#main table.queues tr.failed td { background:#ffecec; border-top:2px solid #d37474; font-size:90%; color:#d37474;} -#main table.queues tr.failed td a{ color:#d37474;} - -#main table.jobs td.class { font-family:Monaco, "Courier New", monospace; font-size:90%; width:50%;} -#main table.jobs td.args{ width:50%;} - -#main table.workers td.icon {width:1%; background:#efefef;text-align:center;} -#main table.workers td.where { width:25%;} -#main table.workers td.queues { width:35%;} -#main .queue-tag { background:#b1d2e9; padding:2px; margin:0 3px; font-size:80%; text-decoration:none; text-transform:uppercase; font-weight:bold; color:#3274a2; -webkit-border-radius:4px; -moz-border-radius:4px;} -#main table.workers td.queues.queue { width:10%;} -#main table.workers td.process { width:35%;} -#main table.workers td.process span.waiting { color:#999; font-size:90%;} -#main table.workers td.process small { font-size:80%; margin-left:5px;} -#main table.workers td.process code { font-family:Monaco, "Courier New", monospace; font-size:90%;} -#main table.workers td.process small a { color:#999;} -#main.polling table.workers tr.working td { background:#f4ffe4; color:#7ac312;} -#main.polling table.workers tr.working td.where a { color:#7ac312;} -#main.polling table.workers tr.working td.process code { font-weight:bold;} - - -#main table.stats th { font-size:100%; width:40%; color:#000;} -#main hr { border:0; border-top:5px solid #efefef; margin:15px 0;} - -#footer { padding:10px 5%; background:#efefef; color:#999; font-size:85%; line-height:1.5; border-top:5px solid #ccc; padding-top:10px;} -#footer p a { color:#999;} - -#main p.poll { background:url(poll.png) no-repeat 0 2px; padding:3px 0; padding-left:23px; float:right; font-size:85%; } - -#main ul.failed {} -#main ul.failed li {background:-webkit-gradient(linear, left top, left bottom, from(#efefef), to(#fff)) #efefef; margin-top:10px; padding:10px; overflow:hidden; -webkit-border-radius:5px; border:1px solid #ccc; } -#main ul.failed li dl dt {font-size:80%; color:#999; width:60px; float:left; padding-top:1px; text-align:right;} -#main ul.failed li dl dd {margin-bottom:10px; margin-left:70px;} -#main ul.failed li dl dd code, #main ul.failed li dl dd pre { font-family:Monaco, "Courier New", monospace; font-size:90%;} -#main ul.failed li dl dd.error a {font-family:Monaco, "Courier New", monospace; font-size:90%; } -#main ul.failed li dl dd.error pre { margin-top:3px; line-height:1.3;} - -#main p.pagination { background:#efefef; padding:10px; overflow:hidden;} -#main p.pagination a.less { float:left;} -#main p.pagination a.more { float:right;} - -#main form.clear-failed {float:right; margin-top:-10px;} - -ul.pagination{ - list-style-type: none; -} -ul.pagination li{ - text-decoration: none; - display:inline; -} \ No newline at end of file diff --git a/resweb/media/working.png b/resweb/media/working.png deleted file mode 100755 index 06f1ee390a69aede315c33ab6ba8c085bcdd003e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 792 zcmV+z1LypSP)EC;hyLIe&>0f-+2gQ40f`JC2;i49PokwJkJAXv$+T%L5!V5v?59o z-w?z2?=wni%x1Io;&<@lt?E90n_wf)v1<)A_QSQ+_GV`(--u_#Sjh$FqRJJ@ z*sy6Boqq{jfy$Uq9{rZaI@uQCQm&Ff^^Q!Rstd zE@y#qguEY%O=BNEaXkMDV-Sw0cf*m@wyS6A<(~^_D65dbP}MB~O;yWwIe1Z1b@nBi zT0-@=C@^6t3gF(0XuENt32)AWAaYi@7N;l!Rn_ehmoN;v5}8kCOlnS0(?l`{f*=Gf zrye|=3jJdrA)$gmK9{@w;@z~o-DAp(5R&`QwvC#RB%coMFBj0N*B_=M1p!@Eag;@~0?~niK+Z!TD?Vdw{eS*KyP26sWCBTxu!rJWIOloF!%2c%h zoSU!>=O*2MeKP~J%&K{aa9lrCl$!YHWH*dXw&PioRRUrRF=#avZ%+L;jXPPt1sDL; W*<1`LTn4ZJ0000\w.*)/') -def queue(request, queue_id): - start = int(request.GET.get('start',0)) - return Queue(HOST, queue_id, start).render().encode('utf-8') - -@get('/failed/') -def failed(request): - start = request.GET.get('start',0) - start = int(start) - return Failed(HOST, start).render().encode('utf-8') - -@post('/failed/retry/') -def failed_retry(request): - failed_job = request.POST['failed_job'] - job = b64decode(failed_job) - decoded = ResQ.decode(job) - failure.retry(HOST, decoded['queue'], job) - raise Redirect('/failed/') - -@post('/failed/delete/') -def failed_delete(request): - failed_job = request.POST['failed_job'] - job = b64decode(failed_job) - failure.delete(HOST, job) - raise Redirect('/failed/') - -@get('/failed/delete_all/') -def delete_all_failed(request): - #move resque:failed to resque:failed-staging - HOST.redis.rename('resque:failed','resque:failed-staging') - HOST.redis.delete('resque:failed-staging') - raise Redirect('/failed/') - - -@get('/failed/retry_all') -def retry_failed(request, number=5000): - failures = failure.all(HOST, 0, number) - for f in failures: - j = b64decode(f['redis_value']) - failure.retry(HOST, f['queue'], j) - raise Redirect('/failed/') - -@get('/workers/(?P\w.+)/') -def worker(request, worker_id): - return Worker(HOST, worker_id).render().encode('utf-8') - -@get('/workers/') -def workers(request): - return Workers(HOST).render().encode('utf-8') - -@get('/stats/') -def stats(request): - raise Redirect('/stats/resque/') - -@get('/stats/(?P\w+)/') -def stats(request, key): - return Stats(HOST, key).render().encode('utf-8') - -@get('/stat/(?P\w.+)') -def stat(request, stat_id): - return Stat(HOST, stat_id).render().encode('utf-8') - -@get('/delayed/') -def delayed(request): - start = request.GET.get('start',0) - start = int(start) - return Delayed(HOST, start).render().encode('utf-8') - -@get('/delayed/(?P\w.+)') -def delayed_timestamp(request, timestamp): - start = request.GET.get('start',0) - start = int(start) - return DelayedTimestamp(HOST, timestamp, start).render().encode('utf-8') - -@get('/media/(?P.+)') -def my_media(request, filename): - #return serve_static_file(request, filename) - #my_media.content_type = content_type(filename) - - return serve_static_file(request, filename, root=MY_ROOT) - #output = static_file(filename, root=MY_ROOT) - #return Response(output, content_type=content_type(filename)) - #return static_file(request, filename=filename, root=my_root) - - -# The hook to make it run in a mod_wsgi environment. -def application(environ, start_response): - return handle_request(environ, start_response) - -if __name__ == "__main__": - run_itty() diff --git a/resweb/templates/delayed.mustache b/resweb/templates/delayed.mustache deleted file mode 100644 index 4f67225..0000000 --- a/resweb/templates/delayed.mustache +++ /dev/null @@ -1,31 +0,0 @@ -{{>header}} -

Delayed Jobs

- -

- This list below contains the timestamps for scheduled delayed jobs. -

- -

- Showing {{start}} to {{end}} of {{size}} timestamps -

- - - - - - - - {{#jobs}} - - - - - {{/jobs}} -
TimestampJob count
{{formated_time}}{{size}}
- -{{#pagination}} -
  • - {{#current}}{{/current}}{{link_name}}{{#current}}{{/current}} -
  • -{{/pagination}} -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/delayed_timestamp.mustache b/resweb/templates/delayed_timestamp.mustache deleted file mode 100644 index 2e7e09e..0000000 --- a/resweb/templates/delayed_timestamp.mustache +++ /dev/null @@ -1,30 +0,0 @@ -{{>header}} - -

    Delayed jobs scheduled for {{formated_timestamp}}

    - -

    Showing {{start}} to {{end}} of {{size}} jobs

    - - - - - - - {{#jobs}} - - - - - {{/jobs}} - {{#no_jobs}} - - - - {{/no_jobs}} -
    ClassArgs
    {{class}}{{args}}
    There are no pending jobs scheduled for this time.
    - -{{#pagination}} -
  • - {{#current}}{{/current}}{{link_name}}{{#current}}{{/current}} -
  • -{{/pagination}} -{{>footer}} diff --git a/resweb/templates/failed.mustache b/resweb/templates/failed.mustache deleted file mode 100644 index 8c5cb7a..0000000 --- a/resweb/templates/failed.mustache +++ /dev/null @@ -1,71 +0,0 @@ -{{>header}} - -

    Failed Jobs

    -
    -

    Showing {{start}} to {{end}} of {{size}} jobs

    - -
      - {{#failed_jobs}} -
    • -
      -
      Worker
      -
      - {{worker}} on {{queue}} at - {{failed_at}} -
      -
      Class
      -
      {{payload_class}}
      -
      Arguments
      -
      {{payload_args}}
      -
      Exception
      -
      {{exception}}
      -
      Error
      -
      - {{error}} -
      {{traceback}}
      -
      -
      - Payload Actions -
      -
      -
      -

      -

      -
      -
      -

      -

      -
      -
      -
      -
      -
      -
    • - {{/failed_jobs}} -
    - -{{>footer}} diff --git a/resweb/templates/footer.mustache b/resweb/templates/footer.mustache deleted file mode 100644 index 8d1cc11..0000000 --- a/resweb/templates/footer.mustache +++ /dev/null @@ -1,9 +0,0 @@ -
    - - - - - \ No newline at end of file diff --git a/resweb/templates/header.mustache b/resweb/templates/header.mustache deleted file mode 100644 index ac0f7da..0000000 --- a/resweb/templates/header.mustache +++ /dev/null @@ -1,28 +0,0 @@ - - - - pyres. - - - - - - - - - -
    diff --git a/resweb/templates/overview.mustache b/resweb/templates/overview.mustache deleted file mode 100644 index 7a21d2f..0000000 --- a/resweb/templates/overview.mustache +++ /dev/null @@ -1,5 +0,0 @@ -{{>header}} -{{>queues}} -
    -{{>working}} -{{>footer}} diff --git a/resweb/templates/queue.mustache b/resweb/templates/queue.mustache deleted file mode 100644 index 2c1bcd0..0000000 --- a/resweb/templates/queue.mustache +++ /dev/null @@ -1,29 +0,0 @@ -{{>header}} - -

    Pending jobs on {{queue}}

    -

    Showing {{start}} to {{end}} of {{size}} jobs

    - - - - - - - {{#jobs}} - - - - - {{/jobs}} - {{#empty_job}} - - - - {{/empty_job}} -
    ClassArgs
    {{class}}{{args}}
    There are no pending jobs in this queue
    -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/queue_full.mustache b/resweb/templates/queue_full.mustache deleted file mode 100644 index 5c940e8..0000000 --- a/resweb/templates/queue_full.mustache +++ /dev/null @@ -1,3 +0,0 @@ -{{>header}} -{{>queues}} -{{>footer}} diff --git a/resweb/templates/queues.mustache b/resweb/templates/queues.mustache deleted file mode 100644 index 3dcb95c..0000000 --- a/resweb/templates/queues.mustache +++ /dev/null @@ -1,41 +0,0 @@ -{{#queue}} -

    Pending jobs on {{queue}}

    -

    Showing {{start}} to {{end}} of {{size}} jobs

    - - - - - - {{#jobs}} - - - - - {{/jobs}} - {{#empty_job}} - - - - {{/empty_job}} -
    ClassArgs
    {{class}}{{args}}
    There are no pending jobs in this queue
    -{{/queue}} -{{#empty}} -

    Queues

    -

    The list below contains all the registered queues with the number of jobs currently in the queue. Select a queue from above to view all jobs currently pending on the queue.

    - - - - - - {{#queues}} - - - - - {{/queues}} - - - - -
    NameJobs
    {{queue}}{{size}}
    failed{{fail_count}}
    -{{/empty}} diff --git a/resweb/templates/stat.mustache b/resweb/templates/stat.mustache deleted file mode 100644 index aa87a18..0000000 --- a/resweb/templates/stat.mustache +++ /dev/null @@ -1,13 +0,0 @@ -{{>header}} -

    Key "{{key}}" is a {{key_type}}

    -

    size: {{size}}

    - - {{#items}} - - - - {{/items}} -
    - {{row}} -
    -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/stats.mustache b/resweb/templates/stats.mustache deleted file mode 100644 index 04b644b..0000000 --- a/resweb/templates/stats.mustache +++ /dev/null @@ -1,37 +0,0 @@ -{{>header}} -{{#standard}} -

    {{title}}

    - - {{#stats}} - - - - - {{/stats}} -
    - {{key}} - - {{value}} -
    -{{/standard}} -{{#resque_keys}} -

    {{title}}

    -

    (All keys are actually prefixed with "resque:")

    - - - - - - - {{#stats}} - - - - - - {{/stats}} -
    keytypesize
    - {{key}} - {{type}}{{size}}
    -{{/resque_keys}} -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/worker.mustache b/resweb/templates/worker.mustache deleted file mode 100644 index a54e893..0000000 --- a/resweb/templates/worker.mustache +++ /dev/null @@ -1,37 +0,0 @@ -{{>header}} -

    Worker {{worker}}

    - - - - - - - - - - - - - - - - - - - - - -
     HostPidStartedQueuesProcessedFailedProcessing
    {{state}}{{host}}{{pid}}{{started_at}} - {{#queues}} - {{q}}  - {{/queues}} - {{processed}}{{failed}} - {{#data}} - {{code}} - {{runat}} - {{/data}} - {{#nodata}} - Waiting for a job... - {{/nodata}} -
    -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/workers.mustache b/resweb/templates/workers.mustache deleted file mode 100644 index dc03206..0000000 --- a/resweb/templates/workers.mustache +++ /dev/null @@ -1,38 +0,0 @@ -{{>header}} -

    {{size}} Workers

    -

    The workers listed below are all registered as active on your system.

    - - - - - - - - {{#workers}} - - - - - - - - {{/workers}} - {{#empty}} - - - - {{/empty}} -
     WhereQueuesProcessing
    {{state}}{{host}}:{{pid}} - {{#queues}} - {{q}}  - {{/queues}} - - {{#data}} - {{code}} - {{runat}} - {{/data}} - {{#nodata}} - Waiting for a job... - {{/nodata}} -
    There are no registered workers
    -{{>footer}} \ No newline at end of file diff --git a/resweb/templates/working.mustache b/resweb/templates/working.mustache deleted file mode 100644 index c1a3a58..0000000 --- a/resweb/templates/working.mustache +++ /dev/null @@ -1,33 +0,0 @@ -

    {{worker_size}} of {{total_workers}} Workers Working

    -

    The list below contains all workers which are currently running a job.

    - - - - - - - - {{#empty_workers}} - - - - {{/empty_workers}} - {{#workers}} - - - - - - - {{/workers}} -
     WhereQueueProcessing
    Nothing is happening right now...
    {{state}}{{host}}:{{pid}} - {{queue}} - - {{#data}} - {{code}} - {{runat}} - {{/data}} - {{#nodata}} - Waiting for a job... - {{/nodata}} -
    diff --git a/resweb/templates/working_full.mustache b/resweb/templates/working_full.mustache deleted file mode 100644 index 7e83b3e..0000000 --- a/resweb/templates/working_full.mustache +++ /dev/null @@ -1,3 +0,0 @@ -{{>header}} -{{>working}} -{{>footer}} \ No newline at end of file diff --git a/resweb/views.py b/resweb/views.py deleted file mode 100644 index c3407fa..0000000 --- a/resweb/views.py +++ /dev/null @@ -1,517 +0,0 @@ -import pystache - -from pyres import __version__ -from pyres.worker import Worker as Wrkr -from pyres import failure -import os -import datetime - -TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'templates') -class ResWeb(pystache.TemplateSpec): - template_path = TEMPLATE_PATH - renderer = pystache.Renderer(search_dirs = template_path) - - def __init__(self, host): - self.resq = host - - def render(self): - return self.renderer.render(self) - - def media_folder(self): - return '/media/' - - def close(self): - self.resq.close() - - def address(self): - return '%s:%s' % (self.resq.host,self.resq.port) - - def version(self): - return str(__version__) - - def pages(self, start, size, link_function, width=20): - pages = [] - - num_pages = size / width - if size % width > 0: - num_pages += 1 - - if size < width: - return pages - - for i in range(num_pages): - current = True - if start == i*width: - current = False - link = link_function(i*width) - link_name = str(i+1) - pages.append(dict(link=link,link_name=link_name,current=current)) - return pages - -class Overview(ResWeb): - def __init__(self, host, queue=None, start=0): - self._queue = queue - self._start = start - super(Overview, self).__init__(host) - - def queue(self): - return self._queue - - def queues(self): - queues = [] - for q in sorted(self.resq.queues()): - queues.append({ - 'queue': q, - 'size': str(self.resq.size(q)), - }) - return queues - - def start(self): - return str(self._start) - - def end(self): - return str(self._start + 20) - - def size(self): - return str(self.resq.size(self._queue)) - - def jobs(self): - jobs = [] - for job in self.resq.peek(self._queue, self._start, self._start+20): - jobs.append({ - 'class':job['class'], - 'args':','.join(job['args']) - }) - return jobs - - def empty_jobs(self): - return len(self.jobs()) == 0 - - def empty(self): - return not self._queue - - def fail_count(self): - #from pyres.failure import Failure - return str(failure.count(self.resq)) - - def workers(self): - workers = [] - for w in self.resq.working(): - data = w.processing() - host,pid,queues = str(w).split(':') - item = { - 'state':w.state(), - 'host': host, - 'pid':pid, - 'w':str(w) - } - item['queue'] = w.job().get('queue') - if 'queue' in data: - item['data'] = True - item['code'] = data['payload']['class'] - item['runat'] = str(datetime.datetime.fromtimestamp(float(data['run_at']))) - else: - item['data'] = False - item['nodata'] = not item['data'] - workers.append(item) - return workers - def worker_size(self): - return str(len(self.workers())) - - def total_workers(self): - return str(len(Wrkr.all(self.resq))) - - def empty_workers(self): - if len(self.workers()): - return False - else: - return True - -class Queues(Overview): - template_name = 'queue_full' - -class Working(Overview): - template_name = 'working_full' - -class Workers(ResWeb): - def size(self): - return str(len(self.all())) - - def all(self): - return Wrkr.all(self.resq) - - def workers(self): - workers = [] - for w in self.all(): - data = w.processing() - host,pid,queues = str(w).split(':') - item = { - 'state':w.state(), - 'host': host, - 'pid':pid, - 'w':str(w) - } - qs = [] - for q in queues.split(','): - qs.append({ - 'q':str(q) - }) - item['queues'] = qs - if 'queue' in data: - item['data'] = True - item['code'] = data['payload']['class'] - item['runat'] = str(datetime.datetime.fromtimestamp(float(data['run_at']))) - else: - item['data'] = False - item['nodata'] = not item['data'] - workers.append(item) - return workers - -class Queue(ResWeb): - def __init__(self, host, key, start=0): - self.key = key - self._start = start - super(Queue, self).__init__(host) - - def start(self): - return str(self._start) - - def end(self): - end = self._start + 20 - if end > int(self.size()): - end = self.size() - return str(end) - - def queue(self): - return self.key - - def size(self): - return str(self.resq.size(self.key) or 0) - - def jobs(self): - jobs = [] - for job in self.resq.peek(self.key, self._start, self._start+20): - jobs.append({ - 'class':job['class'], - 'args': str(job['args']) - }) - return jobs - - def pagination(self): - return self.pages(self._start, int(self.size()), self.link_func) - - def link_func(self, start): - return '/queues/%s/?start=%s' % (self.key, start) - - -class Failed(ResWeb): - def __init__(self, host, start=0): - self._start = start - self.host = host - super(Failed, self).__init__(host) - - def start(self): - return str(self._start) - - def end(self): - return str(self._start + 20) - - def size(self): - return str(failure.count(self.resq) or 0) - - def failed_jobs(self): - jobs = [] - for job in failure.all(self.resq, self._start, self._start + 20): - backtrace = job['backtrace'] - - if isinstance(backtrace, list): - backtrace = '\n'.join(backtrace) - - item = job - item['failed_at'] = job['failed_at'] - item['worker_url'] = '/workers/%s/' % job['worker'] - item['payload_args'] = str(job['payload']['args'])[:1024] - item['payload_class'] = job['payload']['class'] - item['traceback'] = backtrace - jobs.append(item) - - return jobs - - def pagination(self): - return self.pages(self._start, int(self.size()), self.link_func) - - def link_func(self, start): - return '/failed/?start=%s' % start - -class Stats(ResWeb): - def __init__(self, host, key_id): - self.key_id = key_id - super(Stats, self).__init__(host) - - def sub_nav(self): - sub_nav = [] - sub_nav.append({ - 'section':'stats', - 'subtab':'resque' - }) - sub_nav.append({ - 'section':'stats', - 'subtab':'redis' - }) - sub_nav.append({ - 'section':'stats', - 'subtab':'keys' - }) - return sub_nav - - def title(self): - if self.key_id == 'resque': - return 'Pyres' - elif self.key_id == 'redis': - return '%s:%s' % (self.resq.host,self.resq.port) - elif self.key_id == 'keys': - return 'Keys owned by Pyres' - else: - return '' - - def stats(self): - if self.key_id == 'resque': - return self.resque_info() - elif self.key_id == 'redis': - return self.redis_info() - elif self.key_id == 'keys': - return self.key_info() - else: - return [] - - def resque_info(self): - stats = [] - for key, value in self.resq.info().items(): - stats.append({ - 'key':str(key), - 'value': str(value) - }) - return stats - - def redis_info(self): - stats = [] - for key, value in self.resq.redis.info().items(): - stats.append({ - 'key':str(key), - 'value': str(value) - }) - return stats - def key_info(self): - stats = [] - for key in self.resq.keys(): - - stats.append({ - 'key': str(key), - 'type': str(self.resq.redis.type('resque:'+key)), - 'size': str(redis_size(key, self.resq)) - }) - return stats - def standard(self): - return not self.resque_keys() - - def resque_keys(self): - if self.key_id == 'keys': - return True - return False - -class Stat(ResWeb): - def __init__(self, host, stat_id): - self.stat_id = stat_id - super(Stat, self).__init__(host) - - def key(self): - return str(self.stat_id) - - def key_type(self): - return str(self.resq.redis.type('resque:'+ str(self.stat_id))) - - def items(self): - items = [] - if self.key_type() == 'list': - lst = self.resq.redis.lrange('resque:'+self.stat_id,0,20) or [] - for k in lst: - items.append({ - 'row':str(k) - }) - elif self.key_type() == 'set': - st = self.resq.redis.smembers('resque:'+self.stat_id) or set([]) - for k in st: - items.append({ - 'row':str(k) - }) - elif self.key_type() == 'string': - items.append({ - 'row':str(self.resq.redis.get('resque:'+self.stat_id)) - }) - return items - - def size(self): - return redis_size(self.stat_id,self.resq) - -class Worker(ResWeb): - def __init__(self, host, worker_id): - self.worker_id = worker_id - super(Worker, self).__init__(host) - self._worker = Wrkr.find(worker_id, self.resq) - - def worker(self): - return str(self.worker_id) - - def host(self): - host,pid,queues = str(self.worker_id).split(':') - return str(host) - def pid(self): - host,pid,queues = str(self.worker_id).split(':') - return str(pid) - - def state(self): - return str(self._worker.state()) - - def started_at(self): - return str(self._worker.started) - - def queues(self): - host,pid,queues = str(self.worker_id).split(':') - qs = [] - for q in queues.split(','): - qs.append({ - 'q':str(q) - }) - return qs - - def processed(self): - return str(self._worker.get_processed()) - - def failed(self): - return str(self._worker.get_failed()) - - def data(self): - data = self._worker.processing() - return 'queue' in data - - def nodata(self): - return not self.data() - - def code(self): - data = self._worker.processing() - if self.data(): - return str(data['payload']['class']) - return '' - - def runat(self): - data = self._worker.processing() - if self.data(): - return str(datetime.datetime.fromtimestamp(float(data['run_at']))) - return '' - - """ - item = { - 'state':w.state(), - 'host': host, - 'pid':pid, - 'w':str(w) - } - qs = [] - for q in queues.split(','): - qs.append({ - 'q':str(q) - }) - item['queues'] = qs - if data.has_key('queue'): - item['data'] = True - item['code'] = data['payload']['class'] - item['runat'] = data['run_at'] - else: - item['data'] = False - item['nodata'] = not item['data'] - """ - pass - -class Delayed(ResWeb): - def __init__(self, host, start=0): - self._start = start - super(Delayed, self).__init__(host) - - def start(self): - return str(self._start) - - def end(self): - return str(self._start + 20) - - def size(self): - item = self.resq.delayed_queue_schedule_size() or 0 - return str(item) - - def jobs(self): - jobs = [] - for timestamp in self.resq.delayed_queue_peek(self.start(), self.end()): - t = datetime.datetime.fromtimestamp(float(timestamp)) - item = dict(timestamp=str(timestamp)) - item['size'] = str(self.resq.delayed_timestamp_size(timestamp)) - - item['formated_time'] = str(t) - - jobs.append(item) - return jobs - - def pagination(self): - return self.pages(self._start, int(self.size()), self.link_func) - - def link_func(self, start): - return '/delayed/?start=%s' % start - -class DelayedTimestamp(ResWeb): - def __init__(self, host, timestamp, start=0): - self._start = start - self._timestamp = timestamp - super(DelayedTimestamp, self).__init__(host) - - def formated_timestamp(self): - return str(datetime.datetime.fromtimestamp(float(self._timestamp))) - - def start(self): - return str(self._start) - - def end(self): - return str(self._start + 20) - - def size(self): - item = self.resq.delayed_timestamp_size(self._timestamp) or 0 - return str(item) - - def jobs(self): - jobs = [] - for job in self.resq.delayed_timestamp_peek(self._timestamp, int(self.start()), int(self.end())): - item = { - 'class': str(job['class']), - 'args': str(job['args']) - } - jobs.append(item) - return jobs - - def no_jobs(self): - if int(self.size()) > 0: - return False - return True - - def pagination(self): - return self.pages(self._start, int(self.size()), self.link_func) - - def link_func(self, start): - return '/delayed/?start=%s' % start - -def redis_size(key, resq): - key_type = resq.redis.type('resque:'+key) - item = 0 - if key_type == 'list': - item = resq.redis.llen('resque:'+key) - elif key_type == 'set': - item = resq.redis.scard('resque:'+key) - elif key_type == 'string': - item = 1 - return str(item) From 605ced0f969c29311058462bd0a5ce7e68afdadf Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 1 Jun 2012 21:04:48 -0500 Subject: [PATCH 022/102] updating version and manifest --- MANIFEST.in | 1 - pyres/__init__.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 326432f..e69de29 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +0,0 @@ -recursive-include resweb *.mustache media/* diff --git a/pyres/__init__.py b/pyres/__init__.py index 95799c8..0ad5dcc 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.2' +__version__ = '1.3' from redis import Redis import pyres.json_parser as json From e57a20f4bdc6be1660e0b4b9830d8e8bd21d008a Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 1 Jun 2012 21:29:15 -0500 Subject: [PATCH 023/102] temporarily removing pypy --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d000dd9..c8a616d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: python python: - "2.6" - "2.7" - - "pypy" +# - "pypy" # command to install dependencies install: - pip install -r requirements-test.txt --use-mirrors From 1db76b83e43ba97082969bea07d7f3cf1341f54c Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 1 Jun 2012 21:33:06 -0500 Subject: [PATCH 024/102] removing resweb script --- pyres/scripts.py | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/pyres/scripts.py b/pyres/scripts.py index 7302b82..55c0854 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -2,12 +2,9 @@ from optparse import OptionParser -from itty import run_itty - from pyres.horde import Khan from pyres import setup_logging, setup_pidfile from pyres.scheduler import Scheduler -from resweb import server as resweb_server from pyres.worker import Worker @@ -60,42 +57,6 @@ def pyres_scheduler(): Scheduler.run(server) -def pyres_web(): - usage = "usage: %prog [options]" - parser = OptionParser(usage) - parser.add_option("--host", - dest="host", - default="localhost", - metavar="HOST") - parser.add_option("--port", - dest="port", - type="int", - default=8080) - parser.add_option("--dsn", - dest="dsn", - help="Redis server to display") - parser.add_option("--auth", - dest="auth", - help="Redis user:pass") - parser.add_option("--server", - dest="server", - help="Server for itty to run under.", - default='wsgiref') - (options,args) = parser.parse_args() - - if options.dsn: - from pyres import ResQ - if options.auth is not None: - from redis import Redis - rhost, rport = options.dsn.split(':') - ruser, rpass = options.auth.split(':') - redis = Redis(host=rhost, port=int(rport), db=ruser, password=rpass) - resweb_server.HOST = ResQ(redis) - else: - resweb_server.HOST = ResQ(options.dsn) - run_itty(host=options.host, port=options.port, server=options.server) - - def pyres_worker(): usage = "usage: %prog [options] arg1" parser = OptionParser(usage=usage) From 58079c5d19cf5e505c68403f1991a4c9d513d6bc Mon Sep 17 00:00:00 2001 From: Matt George Date: Sat, 2 Jun 2012 09:40:27 -0500 Subject: [PATCH 025/102] getting ready for new release --- HISTORY.md | 1 + setup.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index e8e8c85..679f19b 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,6 @@ ##1.3 (2012-06-01) * remove resweb from pyres +* resweb is now available at http://github.com/Pyres/resweb or on pypi ##1.2 * release with changes from pull requests diff --git a/setup.py b/setup.py index fdd19fe..53c095b 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,6 @@ from setuptools import setup, find_packages -version='1.3' - +version=__import__('pyres').__version__ setup( name='pyres', version=version, From 5dd360ad5aa3c45c0a41de2426c9103bc3e07d94 Mon Sep 17 00:00:00 2001 From: Matt George Date: Sat, 2 Jun 2012 10:02:51 -0500 Subject: [PATCH 026/102] fixing setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 53c095b..256a0f9 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version=__import__('pyres').__version__ +version='1.3' setup( name='pyres', version=version, From 7103f8aa47620b639568b321fd41aa710c6b0b8c Mon Sep 17 00:00:00 2001 From: Matt George Date: Sun, 3 Jun 2012 21:31:22 -0500 Subject: [PATCH 027/102] fixing failing tests --- tests/test_failure.py | 2 +- tests/test_jobs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_failure.py b/tests/test_failure.py index c8c9539..04e538f 100644 --- a/tests/test_failure.py +++ b/tests/test_failure.py @@ -54,4 +54,4 @@ def test_requeue(self): mod_with_class = '{module}.{klass}'.format( module=self.job_class.__module__, klass=self.job_class.__name__) - assert job._payload == {'class':mod_with_class,'args':['test1']} + self.assertEqual(job._payload, {'class':mod_with_class,'args':['test1'],'enqueue_timestamp': job.enqueue_timestamp}) diff --git a/tests/test_jobs.py b/tests/test_jobs.py index a936c1a..8f2fdc3 100644 --- a/tests/test_jobs.py +++ b/tests/test_jobs.py @@ -7,7 +7,7 @@ def test_reserve(self): job = Job.reserve('basic', self.resq) assert job._queue == 'basic' assert job._payload - assert job._payload == {'class':'tests.Basic','args':['test1']} + self.assertEqual(job._payload, {'class':'tests.Basic','args':['test1'],'enqueue_timestamp':job.enqueue_timestamp}) def test_perform(self): self.resq.enqueue(Basic,"test1") From f475dcb266cecd55d0f9669ce17e0a618f8635c2 Mon Sep 17 00:00:00 2001 From: Matt George Date: Sun, 3 Jun 2012 21:36:59 -0500 Subject: [PATCH 028/102] tweaking before and after perform methods --- pyres/job.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index dd20c76..242fa53 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -73,14 +73,16 @@ def perform(self): metadata["enqueue_timestamp"] = self.enqueue_timestamp before_perform = getattr(payload_class, "before_perform", None) - if before_perform: - before_perform(metadata) metadata["failed"] = False metadata["perform_timestamp"] = time.time() + check_after = True try: + if before_perform: + before_perform(metadata) return payload_class.perform(*args) except: + check_after = False metadata["failed"] = True if not self.retry(payload_class, args): metadata["retried"] = False @@ -89,7 +91,7 @@ def perform(self): metadata["retried"] = True finally: after_perform = getattr(payload_class, "after_perform", None) - if after_perform: + if after_perform and check_after: after_perform(metadata) def fail(self, exception): From 7f88c1395b676f41b5ea98f2478afe35f46a2850 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 6 Jun 2012 19:27:40 -0500 Subject: [PATCH 029/102] adding payload_class to both before and after perform --- pyres/job.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index 242fa53..243f86c 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -79,7 +79,7 @@ def perform(self): check_after = True try: if before_perform: - before_perform(metadata) + before_perform(payload_class, metadata) return payload_class.perform(*args) except: check_after = False @@ -92,7 +92,8 @@ def perform(self): finally: after_perform = getattr(payload_class, "after_perform", None) if after_perform and check_after: - after_perform(metadata) + after_perform(payload_class, metadata) + delattr(payload_class,'resq') def fail(self, exception): """This method provides a way to fail a job and will use whatever From 2166c708208f96527a7f8db8361568cd2480bbbd Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 6 Jun 2012 19:58:46 -0500 Subject: [PATCH 030/102] updating theme --- docs/source/_theme/flask/layout.html | 25 ++ docs/source/_theme/flask/relations.html | 19 + docs/source/_theme/flask/static/flasky.css_t | 395 ++++++++++++++++++ .../_theme/flask/static/small_flask.css | 70 ++++ docs/source/_theme/flask/theme.conf | 9 + docs/source/conf.py | 8 +- 6 files changed, 522 insertions(+), 4 deletions(-) create mode 100644 docs/source/_theme/flask/layout.html create mode 100644 docs/source/_theme/flask/relations.html create mode 100644 docs/source/_theme/flask/static/flasky.css_t create mode 100644 docs/source/_theme/flask/static/small_flask.css create mode 100644 docs/source/_theme/flask/theme.conf diff --git a/docs/source/_theme/flask/layout.html b/docs/source/_theme/flask/layout.html new file mode 100644 index 0000000..5caa4e2 --- /dev/null +++ b/docs/source/_theme/flask/layout.html @@ -0,0 +1,25 @@ +{%- extends "basic/layout.html" %} +{%- block extrahead %} + {{ super() }} + {% if theme_touch_icon %} + + {% endif %} + +{% endblock %} +{%- block relbar2 %}{% endblock %} +{% block header %} + {{ super() }} + {% if pagename == 'index' %} +
    + {% endif %} +{% endblock %} +{%- block footer %} + + {% if pagename == 'index' %} +
    + {% endif %} +{%- endblock %} diff --git a/docs/source/_theme/flask/relations.html b/docs/source/_theme/flask/relations.html new file mode 100644 index 0000000..3bbcde8 --- /dev/null +++ b/docs/source/_theme/flask/relations.html @@ -0,0 +1,19 @@ +

    Related Topics

    + diff --git a/docs/source/_theme/flask/static/flasky.css_t b/docs/source/_theme/flask/static/flasky.css_t new file mode 100644 index 0000000..b5ca39b --- /dev/null +++ b/docs/source/_theme/flask/static/flasky.css_t @@ -0,0 +1,395 @@ +/* + * flasky.css_t + * ~~~~~~~~~~~~ + * + * :copyright: Copyright 2010 by Armin Ronacher. + * :license: Flask Design License, see LICENSE for details. + */ + +{% set page_width = '940px' %} +{% set sidebar_width = '220px' %} + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'Georgia', serif; + font-size: 17px; + background-color: white; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + width: {{ page_width }}; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 {{ sidebar_width }}; +} + +div.sphinxsidebar { + width: {{ sidebar_width }}; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +img.floatingflask { + padding: 0 0 10px 10px; + float: right; +} + +div.footer { + width: {{ page_width }}; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +div.related { + display: none; +} + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebar { + font-size: 14px; + line-height: 1.5; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0 0 20px 0; + margin: 0; + text-align: center; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Garamond', 'Georgia', serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: 'Georgia', serif; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +{% if theme_index_logo %} +div.indexwrapper h1 { + text-indent: -999999px; + background: url({{ theme_index_logo }}) no-repeat center center; + height: {{ theme_index_logo_height }}; +} +{% endif %} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #ddd; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #eaeaea; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + background: #fafafa; + margin: 20px -30px; + padding: 10px 30px; + border-top: 1px solid #ccc; + border-bottom: 1px solid #ccc; +} + +div.admonition tt.xref, div.admonition a tt { + border-bottom: 1px solid #fafafa; +} + +dd div.admonition { + margin-left: -60px; + padding-left: 60px; +} + +div.admonition p.admonition-title { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: white; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt { + font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +img.screenshot { +} + +tt.descname, tt.descclassname { + font-size: 0.95em; +} + +tt.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #eee; + background: #fdfdfd; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.footnote td.label { + width: 0px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #eee; + padding: 7px 30px; + margin: 15px -30px; + line-height: 1.3em; +} + +dl pre, blockquote pre, li pre { + margin-left: -60px; + padding-left: 60px; +} + +dl dl pre { + margin-left: -90px; + padding-left: 90px; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid white; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt { + background: #EEE; +} diff --git a/docs/source/_theme/flask/static/small_flask.css b/docs/source/_theme/flask/static/small_flask.css new file mode 100644 index 0000000..1c6df30 --- /dev/null +++ b/docs/source/_theme/flask/static/small_flask.css @@ -0,0 +1,70 @@ +/* + * small_flask.css_t + * ~~~~~~~~~~~~~~~~~ + * + * :copyright: Copyright 2010 by Armin Ronacher. + * :license: Flask Design License, see LICENSE for details. + */ + +body { + margin: 0; + padding: 20px 30px; +} + +div.documentwrapper { + float: none; + background: white; +} + +div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: white; +} + +div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, +div.sphinxsidebar h3 a { + color: white; +} + +div.sphinxsidebar a { + color: #aaa; +} + +div.sphinxsidebar p.logo { + display: none; +} + +div.document { + width: 100%; + margin: 0; +} + +div.related { + display: block; + margin: 0; + padding: 10px 0 20px 0; +} + +div.related ul, +div.related ul li { + margin: 0; + padding: 0; +} + +div.footer { + display: none; +} + +div.bodywrapper { + margin: 0; +} + +div.body { + min-height: 0; + padding: 0; +} diff --git a/docs/source/_theme/flask/theme.conf b/docs/source/_theme/flask/theme.conf new file mode 100644 index 0000000..18c720f --- /dev/null +++ b/docs/source/_theme/flask/theme.conf @@ -0,0 +1,9 @@ +[theme] +inherit = basic +stylesheet = flasky.css +pygments_style = flask_theme_support.FlaskyStyle + +[options] +index_logo = '' +index_logo_height = 120px +touch_icon = diff --git a/docs/source/conf.py b/docs/source/conf.py index 6962aa8..2a8c365 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -39,7 +39,7 @@ # General information about the project. project = u'pyres' -copyright = u'2010, Matt George' +copyright = u'2012, Matt George' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -92,7 +92,9 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +sys.path.append(os.path.abspath('_theme')) +html_theme_path = ['_theme'] +html_theme = 'flask' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -194,5 +196,3 @@ # If false, no module index is generated. #latex_use_modindex = True -html_theme = "nature" -html_theme_path = ["_theme"] \ No newline at end of file From df7fe42c3ae9c2d485e933f90fa794602bb9b344 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 6 Jun 2012 19:59:05 -0500 Subject: [PATCH 031/102] tweaking job docs --- docs/source/class.rst | 4 ++-- pyres/job.py | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/source/class.rst b/docs/source/class.rst index 7782ae8..6121909 100644 --- a/docs/source/class.rst +++ b/docs/source/class.rst @@ -10,7 +10,7 @@ Job Classes ================= .. autoclass:: pyres.job.Job - :members: + :members: Worker Classes ================= @@ -25,4 +25,4 @@ Failure Classes :members: .. autoclass:: pyres.failure.RedisBackend - :members: \ No newline at end of file + :members: diff --git a/pyres/job.py b/pyres/job.py index 243f86c..88e8ae7 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -60,7 +60,6 @@ def perform(self): called regardless of whether an exception is ultimately thrown by the perform method. - #@ add entry_point loading """ payload_class_str = self._payload["class"] @@ -106,6 +105,11 @@ def fail(self, exception): return fail def retry(self, payload_class, args): + """This method provides a way to retry a job after a failure. + If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres + will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class. + + """ retry_every = getattr(payload_class, 'retry_every', None) retry_timeout = getattr(payload_class, 'retry_timeout', 0) From a907e5ce421848854ba004fc0341af9151db3151 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 6 Jun 2012 20:03:52 -0500 Subject: [PATCH 032/102] removing version import --- docs/source/conf.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 2a8c365..545ed07 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -12,7 +12,6 @@ # serve to show the default. import sys, os -from pyres import __version__ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -46,9 +45,9 @@ # built documents. # # The short X.Y version. -version = __version__ +version = '1.3' # The full version, including alpha/beta/rc tags. -release = __version__ +release = '1.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 926bd283bf792a229aab82c8ff7313967978463c Mon Sep 17 00:00:00 2001 From: Christy O'Reilly Date: Fri, 15 Jun 2012 11:08:07 +0100 Subject: [PATCH 033/102] Use enqueue from string as a base for enqueue --- pyres/__init__.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index fbf5e09..20207c4 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -212,17 +212,12 @@ def enqueue(self, klass, *args): queue = getattr(klass,'queue', None) if queue: class_name = '%s.%s' % (klass.__module__, klass.__name__) - self.push(queue, {'class':class_name,'args':args}) - logging.info("enqueued '%s' job on queue %s" % (class_name, queue)) - if args: - logging.debug("job arguments: %s" % str(args)) - else: - logging.debug("no arguments passed in.") + self.enqueue_from_string(class_name, queue, *args) else: logging.warning("unable to enqueue job with class %s" % str(klass)) def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs): - payload = {'class':klass_as_string, 'queue': queue, 'args':args} + payload = {'class':klass_as_string, 'args':args} if 'first_attempt' in kwargs: payload['first_attempt'] = kwargs['first_attempt'] self.push(queue, payload) From 8fc4e5af151457f6dbaa9dd2ffe5aaf435922175 Mon Sep 17 00:00:00 2001 From: Christy O'Reilly Date: Fri, 15 Jun 2012 11:12:15 +0100 Subject: [PATCH 034/102] Add enqueue_at_from_string --- pyres/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 20207c4..e879d19 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -281,11 +281,14 @@ def close(self): def enqueue_at(self, datetime, klass, *args, **kwargs): class_name = '%s.%s' % (klass.__module__, klass.__name__) + self.enqueue_at_from_string(datetime, class_name, klass.queue, *args, **kwargs) + + def enqueue_at_from_string(self, datetime, klass_as_string, queue, *args, **kwargs): logging.info("scheduled '%s' job on queue %s for execution at %s" % - (class_name, klass.queue, datetime)) + (klass_as_string, queue, datetime)) if args: logging.debug("job arguments are: %s" % str(args)) - payload = {'class':class_name, 'queue': klass.queue, 'args':args} + payload = {'class': klass_as_string, 'queue': queue, 'args': args} if 'first_attempt' in kwargs: payload['first_attempt'] = kwargs['first_attempt'] self.delayed_push(datetime, payload) From 9f0c177ff1bf069085d5cb35a18de5d4480ecbe3 Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 29 Jun 2012 19:51:25 -0500 Subject: [PATCH 035/102] starting to update HISTORY with new changes --- HISTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 679f19b..1de9258 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,5 @@ +##1.4 (2012-06-?) +* added hooks for before and after perform methods ##1.3 (2012-06-01) * remove resweb from pyres * resweb is now available at http://github.com/Pyres/resweb or on pypi From 3a50d9ac3f612c3b080f81da65cf0bc7d79f40f2 Mon Sep 17 00:00:00 2001 From: Matt George Date: Sun, 1 Jul 2012 10:00:14 -0500 Subject: [PATCH 036/102] updating authors list via git shortlog -s -n --- AUTHORS.md | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/AUTHORS.md b/AUTHORS.md index 68fc934..89cf71b 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -1,11 +1,24 @@ ## Authors * Matt George -* Chris Song -* yashh -* dsc -* Alex Ezell +* Craig Hawco * Michael Russo -* Whit Morris +* Chris Song +* Whit Morriss * Joe Shaw +* Yashwanth Nelapati +* Cezar Sa Espinola +* Alex Ezell +* Christy O'Reilly +* Kevin McConnell +* Bernardo Heynemann +* David Schoonover +* Rob Hudson +* Salimane Adjao Moustapha +* John Hobbs +* James M. Henderson +* Iraê Carvalho +* Fabien Reboia +* Peter Teichman + Inspired by Resque, by Chris Wanstrath From 982c58cf8d3033836edcf458f3d13422574bc5ea Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Thu, 19 Jan 2012 23:41:06 -0500 Subject: [PATCH 037/102] improve some logging, particularly errors that get retried --- pyres/job.py | 2 ++ pyres/worker.py | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index 88e8ae7..72c4961 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -1,3 +1,4 @@ +import logging import time from datetime import timedelta from pyres import ResQ, safe_str_to_class @@ -88,6 +89,7 @@ def perform(self): raise else: metadata["retried"] = True + logging.exception("Retry scheduled after error in %s", job) finally: after_perform = getattr(payload_class, "after_perform", None) if after_perform and check_after: diff --git a/pyres/worker.py b/pyres/worker.py index be84bc5..ca8d8cb 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -22,9 +22,9 @@ class and passes a comma-separated list of queues to listen on.:: >>> Worker.run([queue1, queue2], server="localhost:6379") """ - + job_class = Job - + def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None): self.queues = queues self.validate_queues() @@ -209,10 +209,10 @@ def fork_worker(self, job): logger.debug('done waiting') else: self._setproctitle("Processing %s since %s" % - (job._queue, + (job, datetime.datetime.now())) logger.info('Processing %s since %s' % - (job._queue, datetime.datetime.now())) + (job, datetime.datetime.now())) self.after_fork(job) # re-seed the Python PRNG after forking, otherwise @@ -263,7 +263,7 @@ def process(self, job=None): logger.info('completed job') logger.debug('job details: %s' % job) finally: - self.done_working() + self.done_working(job) def _handle_job_exception(self, job): exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() @@ -290,8 +290,8 @@ def working_on(self, job): logger.debug("worker:%s" % str(self)) logger.debug(self.resq.redis["resque:worker:%s" % str(self)]) - def done_working(self): - logger.info('done working') + def done_working(self, job): + logger.info('done working on %s', job) self.processed() self.resq.redis.delete("resque:worker:%s" % str(self)) From fb5b97a7730f5e9ced111634ab5653397cbb754a Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Thu, 19 Jan 2012 23:56:09 -0500 Subject: [PATCH 038/102] add PID to pyres logging format --- pyres/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index e1ca373..1956e4c 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -20,7 +20,7 @@ def get_logging_handler(filename, procname, namespace=None): message_format = namespace + ': %(message)s' else: message_format = '%(message)s' - format = '%(asctime)s %(levelname)-8s ' + message_format + format = '%(asctime)s %(process)5d %(levelname)-8s ' + message_format if not filename: filename = "stderr" From b21524d05065bf24c22b185839b79ecf267e2181 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Fri, 20 Jan 2012 13:36:00 -0500 Subject: [PATCH 039/102] dial down the logging a bit It is overwhelmingly the noisiest thing in our log files --- pyres/scheduler.py | 2 +- pyres/worker.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pyres/scheduler.py b/pyres/scheduler.py index 37907e1..af5a4d6 100644 --- a/pyres/scheduler.py +++ b/pyres/scheduler.py @@ -66,7 +66,7 @@ def next_item(self, timestamp): def handle_delayed_items(self): for timestamp in self.next_timestamp(): _setproctitle('Handling timestamp %s' % timestamp) - logger.info('handling timestamp: %s' % timestamp) + logger.debug('handling timestamp: %s' % timestamp) for item in self.next_item(timestamp): logger.debug('queueing item %s' % item) klass = item['class'] diff --git a/pyres/worker.py b/pyres/worker.py index ca8d8cb..c90f320 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -130,6 +130,7 @@ def work(self, interval=5): """ self._setproctitle("Starting") + logger.info("starting") self.startup() while True: @@ -260,7 +261,7 @@ def process(self, job=None): self._handle_job_exception(job) if not job_failed: - logger.info('completed job') + logger.debug('completed job') logger.debug('job details: %s' % job) finally: self.done_working(job) @@ -291,7 +292,7 @@ def working_on(self, job): logger.debug(self.resq.redis["resque:worker:%s" % str(self)]) def done_working(self, job): - logger.info('done working on %s', job) + logger.debug('done working on %s', job) self.processed() self.resq.redis.delete("resque:worker:%s" % str(self)) From b02437f622b22733184fddf6bcc5eaf9b8b70c02 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Wed, 18 Jul 2012 11:32:14 -0400 Subject: [PATCH 040/102] require redis >= 2.4.12, not that version specifically --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1f870e9..b4cc583 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ simplejson==2.0.9 -redis==2.4.12 +redis>=2.4.12 setproctitle>=1.0 From b00338413e954e91c79c625dbaf5140d0a57f6e6 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Wed, 18 Jul 2012 13:31:44 -0400 Subject: [PATCH 041/102] before_perform() & after_perform() should be called on class, not pass it in Fixes a problem where you would have: @classmethod def before_perform(cls, metadata): pass and you would get an exception: ... before_perform(payload_class, metadata) TypeError: before_perform() takes exactly 2 arguments (3 given) --- pyres/job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index 88e8ae7..52240d5 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -78,7 +78,7 @@ def perform(self): check_after = True try: if before_perform: - before_perform(payload_class, metadata) + payload_class.before_perform(metadata) return payload_class.perform(*args) except: check_after = False @@ -91,7 +91,7 @@ def perform(self): finally: after_perform = getattr(payload_class, "after_perform", None) if after_perform and check_after: - after_perform(payload_class, metadata) + payload_class.after_perform(metadata) delattr(payload_class,'resq') def fail(self, exception): From c86ca295467ac3b9934ad381f7067383a1b46682 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Wed, 18 Jul 2012 13:24:07 -0400 Subject: [PATCH 042/102] pull setup.py install_requires from requirements.txt Instead of hard-coding it in both places. When I changed the redis requirement in requirements.txt I missed it in setup.py --- setup.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 256a0f9..e33298a 100644 --- a/setup.py +++ b/setup.py @@ -20,10 +20,9 @@ pyres_worker=pyres.scripts:pyres_worker """, install_requires=[ - 'simplejson>=2.0.9', - 'redis==2.4.12', - 'setproctitle>=1.0' - ], + item for item in + open("requirements.txt").read().split("\n") + if item], classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Console', From a4a498ef11e4627acf05bca98f0e74565d209383 Mon Sep 17 00:00:00 2001 From: Joe Shaw Date: Wed, 18 Jul 2012 15:19:27 -0400 Subject: [PATCH 043/102] require at least simplesjon 2.0.9 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b4cc583..05131d7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -simplejson==2.0.9 +simplejson>=2.0.9 redis>=2.4.12 setproctitle>=1.0 From 1e2bf454ce53fc35b9af986fed17bcc0b9c9f0f3 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 18 Jul 2012 15:09:02 -0500 Subject: [PATCH 044/102] fixing broken tests --- pyres/job.py | 2 +- pyres/worker.py | 2 +- tests/test_worker.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index 72c4961..ded0eae 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -89,7 +89,7 @@ def perform(self): raise else: metadata["retried"] = True - logging.exception("Retry scheduled after error in %s", job) + logging.exception("Retry scheduled after error in %s", self._payload) finally: after_perform = getattr(payload_class, "after_perform", None) if after_perform and check_after: diff --git a/pyres/worker.py b/pyres/worker.py index c90f320..c7f15f4 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -205,7 +205,7 @@ def fork_worker(self, job): # If the child process' job called os._exit manually we need to # finish the clean up here. if self.job(): - self.done_working() + self.done_working(job) logger.debug('done waiting') else: diff --git a/tests/test_worker.py b/tests/test_worker.py index 6e35367..7bcf981 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -122,7 +122,7 @@ def test_get_job(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert worker.job() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) assert worker.processing() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) - worker.done_working() + worker.done_working(job) w2 = Worker(['basic']) print w2.job() assert w2.job() == {} @@ -158,7 +158,7 @@ def test_state(self): job = Job.reserve('basic', self.resq) worker.working_on(job) assert worker.state() == 'working' - worker.done_working() + worker.done_working(job) assert worker.state() == 'idle' def test_prune_dead_workers(self): From e74b0f66a75d2af6fe841ef9f8f2147e68df16ad Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 18 Jul 2012 15:28:24 -0500 Subject: [PATCH 045/102] using correct api from redis-py --- pyres/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 1956e4c..27a0b65 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -140,9 +140,8 @@ class ResQ(object): """ def __init__(self, server="localhost:6379", password=None): + self.password = password self.redis = server - if password: - self.redis.auth(password) self._watched_queues = set() def push(self, queue, item): @@ -187,7 +186,7 @@ def _set_redis(self, server): if isinstance(server, basestring): self.dsn = server host, port = server.split(':') - self._redis = Redis(host=host, port=int(port)) + self._redis = Redis(host=host, port=int(port), password=self.password) self.host = host self.port = int(port) elif isinstance(server, Redis): From dab7cb864ab0c07d00023d9a7232d47815c3eac1 Mon Sep 17 00:00:00 2001 From: Matt George Date: Thu, 19 Jul 2012 14:33:01 -0500 Subject: [PATCH 046/102] updating for release --- HISTORY.md | 3 +++ pyres/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 1de9258..d23a370 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,8 @@ ##1.4 (2012-06-?) * added hooks for before and after perform methods +* fixed logging +*fixed problems with password authentication + ##1.3 (2012-06-01) * remove resweb from pyres * resweb is now available at http://github.com/Pyres/resweb or on pypi diff --git a/pyres/__init__.py b/pyres/__init__.py index 27a0b65..cd37d0f 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.3' +__version__ = '1.4' from redis import Redis import pyres.json_parser as json diff --git a/setup.py b/setup.py index 256a0f9..574cbad 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version='1.3' +version='1.4' setup( name='pyres', version=version, From 046209d411c47ea438a7052cbdd9661412902683 Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 30 Jul 2012 14:55:33 -0500 Subject: [PATCH 047/102] SIGINFO doesn't exist on linux --- pyres/horde.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyres/horde.py b/pyres/horde.py index 03be633..0b47047 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -217,7 +217,8 @@ def register_signal_handlers(self): signal.signal(signal.SIGQUIT, self.schedule_shutdown) signal.signal(signal.SIGUSR1, self.kill_child) signal.signal(signal.SIGUSR2, self.add_child) - signal.signal(signal.SIGINFO, self.current_state) + if hasattr(signal, 'SIGINFO'): + signal.signal(signal.SIGINFO, self.current_state) def current_state(self): tmap = {} From ec1877665cfb6783c1c506188712c91847ecf57c Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 30 Jul 2012 14:58:52 -0500 Subject: [PATCH 048/102] 1.4.1 release --- HISTORY.md | 4 ++++ pyres/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index d23a370..53e3ad1 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,7 @@ +##1.4.1 (2012-07-30) +* fix for non existent system signal for linux +* cleanup of setup.py and requirements + ##1.4 (2012-06-?) * added hooks for before and after perform methods * fixed logging diff --git a/pyres/__init__.py b/pyres/__init__.py index cd37d0f..3cd6f93 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.4' +__version__ = '1.4.1' from redis import Redis import pyres.json_parser as json diff --git a/setup.py b/setup.py index fd11236..baee3dd 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version='1.4' +version='1.4.1' setup( name='pyres', version=version, From 9fd9b14f7b84e05f089f535ebf5740d5ce577514 Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 30 Jul 2012 15:03:54 -0500 Subject: [PATCH 049/102] adding package data --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index baee3dd..e534abe 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), download_url='http://pypi.python.org/packages/source/p/pyres/pyres-%s.tar.gz' % version, include_package_data=True, + package_data={'': ['requirements.txt']}, entry_points = """\ [console_scripts] pyres_manager=pyres.scripts:pyres_manager From d54343e94963e208096799b50924435182a0e540 Mon Sep 17 00:00:00 2001 From: Matt George Date: Mon, 30 Jul 2012 15:11:40 -0500 Subject: [PATCH 050/102] adding requirements for sdist --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index e69de29..f9bd145 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -0,0 +1 @@ +include requirements.txt From 2e48ebd378489fbdf7128f588e945e68f9fe2df4 Mon Sep 17 00:00:00 2001 From: Adam Meghji Date: Mon, 17 Sep 2012 18:28:13 -0400 Subject: [PATCH 051/102] Add support for Redis db num in server string: i.e. localhost:6379/0 --- pyres/__init__.py | 7 ++++--- pyres/worker.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 3cd6f93..98a9e75 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -113,7 +113,7 @@ class ResQ(object): The ``__init__`` takes these keyword arguments: - ``server`` -- IP address and port of the Redis server to which you want to connect. Default is `localhost:6379`. + ``server`` -- IP address and port of the Redis server to which you want to connect, and optional Redis DB number. Default is `localhost:6379`. ``password`` -- The password, if required, of your Redis server. Default is "None". @@ -185,8 +185,9 @@ def _get_redis(self): def _set_redis(self, server): if isinstance(server, basestring): self.dsn = server - host, port = server.split(':') - self._redis = Redis(host=host, port=int(port), password=self.password) + address, _, db = server.partition('/') + host, port = address.split(':') + self._redis = Redis(host=host, port=int(port), db=int(db or 0), password=self.password) self.host = host self.port = int(port) elif isinstance(server, Redis): diff --git a/pyres/worker.py b/pyres/worker.py index c7f15f4..29e3236 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -19,7 +19,7 @@ class Worker(object): class and passes a comma-separated list of queues to listen on.:: >>> from pyres.worker import Worker - >>> Worker.run([queue1, queue2], server="localhost:6379") + >>> Worker.run([queue1, queue2], server="localhost:6379/0") """ From 2b78e447f51532f502303504f8b03428de434301 Mon Sep 17 00:00:00 2001 From: Igor Serko Date: Fri, 21 Sep 2012 12:31:58 +0100 Subject: [PATCH 052/102] the main module needs to get a named logger to prevent overtaking the root logger --- pyres/__init__.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 3cd6f93..a2cf43a 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -8,6 +8,8 @@ import sys import logging +logger = logging.getLogger(__name__) + def special_log_file(filename): if filename in ("stderr", "stdout"): return True @@ -56,7 +58,8 @@ def get_logging_handler(filename, procname, namespace=None): def setup_logging(procname, log_level=logging.INFO, filename=None): if log_level == logging.NOTSET: return - logger = logging.getLogger() + main_package = __name__.split('.', 1)[0] if '.' in __name__ else __name__ + logger = logging.getLogger(main_package) logger.setLevel(log_level) handler = get_logging_handler(filename, procname) logger.addHandler(handler) @@ -213,18 +216,18 @@ def enqueue(self, klass, *args): class_name = '%s.%s' % (klass.__module__, klass.__name__) self.enqueue_from_string(class_name, queue, *args) else: - logging.warning("unable to enqueue job with class %s" % str(klass)) + logger.warning("unable to enqueue job with class %s" % str(klass)) def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs): payload = {'class':klass_as_string, 'args':args, 'enqueue_timestamp': time.time()} if 'first_attempt' in kwargs: payload['first_attempt'] = kwargs['first_attempt'] self.push(queue, payload) - logging.info("enqueued '%s' job on queue %s" % (klass_as_string, queue)) + logger.info("enqueued '%s' job on queue %s" % (klass_as_string, queue)) if args: - logging.debug("job arguments: %s" % str(args)) + logger.debug("job arguments: %s" % str(args)) else: - logging.debug("no arguments passed in.") + logger.debug("no arguments passed in.") def queues(self): return self.redis.smembers("resque:queues") or [] @@ -283,10 +286,10 @@ def enqueue_at(self, datetime, klass, *args, **kwargs): self.enqueue_at_from_string(datetime, class_name, klass.queue, *args, **kwargs) def enqueue_at_from_string(self, datetime, klass_as_string, queue, *args, **kwargs): - logging.info("scheduled '%s' job on queue %s for execution at %s" % + logger.info("scheduled '%s' job on queue %s for execution at %s" % (klass_as_string, queue, datetime)) if args: - logging.debug("job arguments are: %s" % str(args)) + logger.debug("job arguments are: %s" % str(args)) payload = {'class': klass_as_string, 'queue': queue, 'args': args} if 'first_attempt' in kwargs: payload['first_attempt'] = kwargs['first_attempt'] From b073ac0be5bf04802ba9b314b05513280d3ce1a8 Mon Sep 17 00:00:00 2001 From: Wes Rogers Date: Fri, 12 Oct 2012 14:06:55 -0700 Subject: [PATCH 053/102] Add exception to metadata dictionary if job fails for use in after_perform --- pyres/job.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyres/job.py b/pyres/job.py index 684a6bd..5f88a0e 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -81,9 +81,10 @@ def perform(self): if before_perform: payload_class.before_perform(metadata) return payload_class.perform(*args) - except: + except Exception as e: check_after = False metadata["failed"] = True + metadata["exception"] = e if not self.retry(payload_class, args): metadata["retried"] = False raise From 626d017b7e3cc8e9e7fa5487157f3fc5359e04f4 Mon Sep 17 00:00:00 2001 From: Guilherme Souza Date: Mon, 5 Nov 2012 17:33:25 -0200 Subject: [PATCH 054/102] registers worker again after a redis flush --- pyres/worker.py | 6 ++++++ tests/test_worker.py | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/pyres/worker.py b/pyres/worker.py index 29e3236..f2e1127 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -51,6 +51,9 @@ def register_worker(self): #self.resq._redis.add("worker:#{self}:started", Time.now.to_s) self.started = datetime.datetime.now() + def is_registered(self): + return self.resq.redis.sismember('resque:workers', str(self)) + def _set_started(self, dt): if dt: key = int(time.mktime(dt.timetuple())) @@ -138,6 +141,9 @@ def work(self, interval=5): logger.info('shutdown scheduled') break + if not self.is_registered(): + self.register_worker() + job = self.reserve(interval) if job: diff --git a/tests/test_worker.py b/tests/test_worker.py index 7bcf981..8b7b8ad 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -38,6 +38,11 @@ def test_unregister(self): assert self.redis.sismember('resque:workers',name) worker.unregister_worker() assert name not in self.redis.smembers('resque:workers') + + def test_worker_is_registered(self): + worker = Worker(['basic']) + worker.register_worker() + assert worker.is_registered() def test_working_on(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') From 7d6569daf6b0905ef13fa6e7f7a8c295396b75e5 Mon Sep 17 00:00:00 2001 From: Guilherme Souza Date: Mon, 5 Nov 2012 19:00:13 -0200 Subject: [PATCH 055/102] checking the registration after 3 iterations --- pyres/worker.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index f2e1127..c91d01f 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -136,13 +136,18 @@ def work(self, interval=5): logger.info("starting") self.startup() + check_worker_registration_wait = 3 + while True: if self._shutdown: logger.info('shutdown scheduled') break - if not self.is_registered(): - self.register_worker() + check_worker_registration_wait -= 1 + if not check_worker_registration_wait: + check_worker_registration_wait = 3 + if not self.is_registered(): + self.register_worker() job = self.reserve(interval) From c7af5db40fd19e1c6afa5df48ea9f51fc177db45 Mon Sep 17 00:00:00 2001 From: Guilherme Souza Date: Wed, 28 Nov 2012 18:00:55 -0200 Subject: [PATCH 056/102] register worker on every iteration --- pyres/worker.py | 11 +---------- tests/test_worker.py | 5 ----- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index c91d01f..4874db8 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -51,9 +51,6 @@ def register_worker(self): #self.resq._redis.add("worker:#{self}:started", Time.now.to_s) self.started = datetime.datetime.now() - def is_registered(self): - return self.resq.redis.sismember('resque:workers', str(self)) - def _set_started(self, dt): if dt: key = int(time.mktime(dt.timetuple())) @@ -136,18 +133,12 @@ def work(self, interval=5): logger.info("starting") self.startup() - check_worker_registration_wait = 3 - while True: if self._shutdown: logger.info('shutdown scheduled') break - check_worker_registration_wait -= 1 - if not check_worker_registration_wait: - check_worker_registration_wait = 3 - if not self.is_registered(): - self.register_worker() + self.register_worker() job = self.reserve(interval) diff --git a/tests/test_worker.py b/tests/test_worker.py index 8b7b8ad..d390625 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -39,11 +39,6 @@ def test_unregister(self): worker.unregister_worker() assert name not in self.redis.smembers('resque:workers') - def test_worker_is_registered(self): - worker = Worker(['basic']) - worker.register_worker() - assert worker.is_registered() - def test_working_on(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') self.resq.enqueue(Basic,"test1") From 104df5b1eec4c80aeb89f474ab5a587fea8d81fe Mon Sep 17 00:00:00 2001 From: Matt George Date: Thu, 3 Jan 2013 19:11:24 -0600 Subject: [PATCH 057/102] updating copyright --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 22c6364..f9725b0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 Matt George +Copyright (c) 2009-2013 Matt George Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the From 0912dcb1876254fc27e9177f1a4d1c96d49e0216 Mon Sep 17 00:00:00 2001 From: Leevar Williams Date: Sun, 20 Jan 2013 06:38:35 +0000 Subject: [PATCH 058/102] Modified worker_pids() to return the correct set of pids for pyres_worker processes and made it a class method. --- pyres/worker.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pyres/worker.py b/pyres/worker.py index 4874db8..f8bd76b 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -75,7 +75,7 @@ def unregister_worker(self): def prune_dead_workers(self): all_workers = Worker.all(self.resq) - known_workers = self.worker_pids() + known_workers = Worker.worker_pids() for worker in all_workers: host, pid, queues = worker.id.split(':') if host != self.hostname: @@ -329,12 +329,16 @@ def state(self): return 'working' return 'idle' - def worker_pids(self): + @classmethod + def worker_pids(cls): """Returns an array of all pids (as strings) of the workers on this machine. Used when pruning dead workers.""" - return map(lambda l: l.strip().split(' ')[0], - commands.getoutput("ps -A -o pid,command | \ - grep pyres_worker").split("\n")) + cmd = "ps -A -o pid,command | grep pyres_worker | grep -v grep" + output = commands.getoutput(cmd) + if output: + return map(lambda l: l.strip().split(' ')[0], output.split("\n")) + else: + return [] @classmethod def run(cls, queues, server="localhost:6379", interval=None, timeout=None): From 548af974926e3f0b42f73d397092ed7ba4e8f237 Mon Sep 17 00:00:00 2001 From: Leevar Williams Date: Sun, 20 Jan 2013 06:39:10 +0000 Subject: [PATCH 059/102] Added test case for worker_pids(). --- tests/test_worker.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/test_worker.py b/tests/test_worker.py index d390625..792ba6c 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -296,5 +296,36 @@ def test_retries_give_up_eventually(self): assert None == worker.process() assert worker.get_failed() == 1 + def test_worker_pids(self): + # spawn worker processes and get pids + pids = [] + pids.append(self.spawn_worker(['basic'])) + pids.append(self.spawn_worker(['basic'])) + time.sleep(1) + worker_pids = Worker.worker_pids() + + # send kill signal to workers and wait for them to exit + import signal + for pid in pids: + os.kill(pid, signal.SIGQUIT) + os.waitpid(pid, 0) + + # ensure worker_pids() returned the correct pids + assert len(worker_pids) == len(pids) + for pid in pids: + assert str(pid) in worker_pids + + # ensure there are no longer any workers running + worker_pids = Worker.worker_pids() + assert len(worker_pids) == 0 + + def spawn_worker(self, queues): + pid = os.fork() + if not pid: + Worker.run(queues, interval=1) + os._exit(0) + else: + return pid + def set_current_time(self, time): ResQ._current_time = staticmethod(lambda: time) From 062b548f7084e39426e8815fc40183fd9b8bb334 Mon Sep 17 00:00:00 2001 From: Leevar Williams Date: Sun, 20 Jan 2013 06:39:34 +0000 Subject: [PATCH 060/102] Fixed small bug in ResQ __str__() method. --- pyres/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 69b1462..b64425c 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -260,7 +260,7 @@ def reserve(self, queues): return Job.reserve(queues, self) def __str__(self): - return "PyRes Client connected to %s" % self.redis.server + return "PyRes Client connected to %s" % self.dsn def workers(self): from pyres.worker import Worker From e119d7ed117267674eb3ead8d65caeb9b4d9c949 Mon Sep 17 00:00:00 2001 From: Leevar Williams Date: Tue, 22 Jan 2013 15:03:31 +0000 Subject: [PATCH 061/102] Fixed worker_pids test case. --- tests/test_worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_worker.py b/tests/test_worker.py index 792ba6c..a6783a9 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -311,13 +311,13 @@ def test_worker_pids(self): os.waitpid(pid, 0) # ensure worker_pids() returned the correct pids - assert len(worker_pids) == len(pids) for pid in pids: assert str(pid) in worker_pids - # ensure there are no longer any workers running + # ensure the workers are no longer returned by worker_pids() worker_pids = Worker.worker_pids() - assert len(worker_pids) == 0 + for pid in pids: + assert str(pid) not in worker_pids def spawn_worker(self, queues): pid = os.fork() From 97cc4c7709242dd39f1f7bc17f6d62dbcc0117ee Mon Sep 17 00:00:00 2001 From: Matt George Date: Fri, 21 Jun 2013 06:31:38 -0500 Subject: [PATCH 062/102] 1.4.2 release --- HISTORY.md | 8 ++++++++ pyres/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 53e3ad1..7236716 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,3 +1,11 @@ +##1.4.2 (2013-06-21) +* __str__ returns correctly with dsn +* worker_pids returns correct set of workers +* workers are re-registered on every job +* add exception metadata for after_perform method +* logger no longer overrides root logger +* support for redis db in dsn + ##1.4.1 (2012-07-30) * fix for non existent system signal for linux * cleanup of setup.py and requirements diff --git a/pyres/__init__.py b/pyres/__init__.py index b64425c..b6c3ff1 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.4.1' +__version__ = '1.4.2' from redis import Redis import pyres.json_parser as json diff --git a/setup.py b/setup.py index e534abe..e404e2f 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import setup, find_packages -version='1.4.1' +version='1.4.2' setup( name='pyres', version=version, From b440083a51f1dcacdcd25e74384d60966cbd2071 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 09:42:27 -0700 Subject: [PATCH 063/102] change prints --- tests/__init__.py | 4 ++-- tests/test_worker.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index a75685a..d75c866 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -14,7 +14,7 @@ class BasicMulti(object): queue = 'basic' @staticmethod def perform(name, age): - print 'name: %s, age: %s' % (name, age) + print('name: %s, age: %s' % (name, age)) class ReturnAllArgsJob(object): @@ -97,7 +97,7 @@ class LongObject(object): def perform(sleep_time): import time time.sleep(sleep_time) - print 'Done Sleeping' + print('Done Sleeping') def test_str_to_class(): ret = str_to_class('tests.Basic') diff --git a/tests/test_worker.py b/tests/test_worker.py index a6783a9..184cb93 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -124,7 +124,7 @@ def test_get_job(self): assert worker.processing() == ResQ.decode(self.redis.get('resque:worker:%s' % name)) worker.done_working(job) w2 = Worker(['basic']) - print w2.job() + print(w2.job()) assert w2.job() == {} def test_working(self): From 30dd8570476091597cfa392e837703246a0f3fc1 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 09:49:59 -0700 Subject: [PATCH 064/102] started the porting by adding a compat.py --- .gitignore | 3 ++- pyres/.compat.py.swo | Bin 0 -> 4096 bytes pyres/__init__.py | 7 ++++--- pyres/compat.py | 27 +++++++++++++++++++++++++++ pyres/json_parser.py | 3 ++- pyres/worker.py | 2 +- 6 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 pyres/.compat.py.swo create mode 100644 pyres/compat.py diff --git a/.gitignore b/.gitignore index ee342e9..23a2c76 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ build/ .coverage *.egg-info logs/ -dist/ \ No newline at end of file +dist/ +*.swp diff --git a/pyres/.compat.py.swo b/pyres/.compat.py.swo new file mode 100644 index 0000000000000000000000000000000000000000..3eb14800a14458fc351e9627f72e593374d5e7d9 GIT binary patch literal 4096 zcmYc?2=nw+FxN9?00IF9hVqt&fzDM*406H@48{3*C8^mU3H%@_HLNd_Dw4S~@R7!85Z5Eu=C(GVC7fzc2c4S^vY0sv%q BC2;@% literal 0 HcmV?d00001 diff --git a/pyres/__init__.py b/pyres/__init__.py index b6c3ff1..a2b353e 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,6 +1,7 @@ __version__ = '1.4.2' from redis import Redis +from pyres.compat import string_types import pyres.json_parser as json import os @@ -152,7 +153,7 @@ def push(self, queue, item): self.redis.rpush("resque:queue:%s" % queue, ResQ.encode(item)) def pop(self, queues, timeout=10): - if isinstance(queues, basestring): + if isinstance(queues, string_types): queues = [queues] ret = self.redis.blpop(["resque:queue:%s" % q for q in queues], timeout=timeout) @@ -186,7 +187,7 @@ def _get_redis(self): return self._redis def _set_redis(self, server): - if isinstance(server, basestring): + if isinstance(server, string_types): self.dsn = server address, _, db = server.partition('/') host, port = address.split(':') @@ -346,7 +347,7 @@ def encode(cls, item): @classmethod def decode(cls, item): - if isinstance(item, basestring): + if isinstance(item, string_types): ret = json.loads(item) return ret return None diff --git a/pyres/compat.py b/pyres/compat.py new file mode 100644 index 0000000..430f72d --- /dev/null +++ b/pyres/compat.py @@ -0,0 +1,27 @@ +import sys +import types + +try: + import cPickle as pickle +except ImportError: # pragma: no cover + import pickle + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: # pragma: no cover + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + long = int +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + long = long + + diff --git a/pyres/json_parser.py b/pyres/json_parser.py index be80fb6..d425683 100644 --- a/pyres/json_parser.py +++ b/pyres/json_parser.py @@ -1,4 +1,5 @@ from datetime import datetime +from pyres.compat import string_types try: #import simplejson as json @@ -24,7 +25,7 @@ def decode(self, json_string): return self.convert(decoded) def convert(self, value): - if isinstance(value, basestring) and value.startswith(DATE_PREFIX): + if isinstance(value, string_types) and value.startswith(DATE_PREFIX): try: return datetime.strptime(value[len(DATE_PREFIX):], DATE_FORMAT) except ValueError: diff --git a/pyres/worker.py b/pyres/worker.py index f8bd76b..972a7fe 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -257,7 +257,7 @@ def process(self, job=None): except Exception: job_failed = True self._handle_job_exception(job) - except SystemExit, e: + except SystemExit as e: if e.code != 0: job_failed = True self._handle_job_exception(job) From b33e01adeedc2dd3c5e12c4412598682e4f945b5 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 09:58:23 -0700 Subject: [PATCH 065/102] updated json_parser --- pyres/horde.py | 2 +- pyres/json_parser.py | 2 +- pyres/worker.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index 0b47047..70990fa 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -85,7 +85,7 @@ def process(self, job): try: self.working_on(job) job.perform() - except Exception, e: + except Exception as e: exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() self.logger.error("%s failed: %s" % (job, e)) job.fail(exceptionTraceback) diff --git a/pyres/json_parser.py b/pyres/json_parser.py index d425683..a8d18e7 100644 --- a/pyres/json_parser.py +++ b/pyres/json_parser.py @@ -31,7 +31,7 @@ def convert(self, value): except ValueError: return value elif isinstance(value, dict): - for k, v in value.iteritems(): + for k, v in value.items(): new = self.convert(v) if new != v: value[k] = new diff --git a/pyres/worker.py b/pyres/worker.py index 972a7fe..3eb4d3c 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -2,8 +2,8 @@ import signal import datetime, time import os, sys -import json_parser as json -import commands +from pyres import json_parser as json +import subprocess import random from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError @@ -334,7 +334,7 @@ def worker_pids(cls): """Returns an array of all pids (as strings) of the workers on this machine. Used when pruning dead workers.""" cmd = "ps -A -o pid,command | grep pyres_worker | grep -v grep" - output = commands.getoutput(cmd) + output = subprocess.getoutput(cmd) if output: return map(lambda l: l.strip().split(' ')[0], output.split("\n")) else: From a4fba8aea50a8168f84e6497f8266d6980d672c2 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:22:47 -0700 Subject: [PATCH 066/102] updated coommands to by py2/py3 compat --- pyres/compat.py | 2 ++ pyres/worker.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pyres/compat.py b/pyres/compat.py index 430f72d..4805b21 100644 --- a/pyres/compat.py +++ b/pyres/compat.py @@ -16,6 +16,7 @@ text_type = str binary_type = bytes long = int + import subprocess as commands else: string_types = basestring, integer_types = (int, long) @@ -23,5 +24,6 @@ text_type = unicode binary_type = str long = long + import commands diff --git a/pyres/worker.py b/pyres/worker.py index 3eb4d3c..9befb1c 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -3,7 +3,7 @@ import datetime, time import os, sys from pyres import json_parser as json -import subprocess +from pyres.compat import commands import random from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError @@ -334,7 +334,7 @@ def worker_pids(cls): """Returns an array of all pids (as strings) of the workers on this machine. Used when pruning dead workers.""" cmd = "ps -A -o pid,command | grep pyres_worker | grep -v grep" - output = subprocess.getoutput(cmd) + output = commands.getoutput(cmd) if output: return map(lambda l: l.strip().split(' ')[0], output.split("\n")) else: From a4a2c9d259459270106d52f637ed50d40073b40e Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:22:59 -0700 Subject: [PATCH 067/102] added tox --- tox.ini | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 tox.ini diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..99608d5 --- /dev/null +++ b/tox.ini @@ -0,0 +1,8 @@ +[tox] +envlist = py27, py33 + +[testenv] +commands = nosetests +deps = + nose + nosexcover From 4e5de75260a60c02229e5b0d4b685fc943420edd Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:29:03 -0700 Subject: [PATCH 068/102] updated dependencies to work well in py2 and 3, use bytestring in test --- requirements.txt | 6 +++--- tests/test_stats.py | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/requirements.txt b/requirements.txt index 05131d7..96046b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -simplejson>=2.0.9 -redis>=2.4.12 -setproctitle>=1.0 +simplejson>3.0 +redis>2.4.12 +setproctitle>1.0 diff --git a/tests/test_stats.py b/tests/test_stats.py index c49ed1a..b6cf291 100644 --- a/tests/test_stats.py +++ b/tests/test_stats.py @@ -4,22 +4,22 @@ class StatTests(PyResTests): def test_incr(self): stat_obj = Stat('test_stat', self.resq) stat_obj.incr() - assert self.redis.get('resque:stat:test_stat') == str(1) + assert self.redis.get('resque:stat:test_stat') == b'1' stat_obj.incr() - assert self.redis.get('resque:stat:test_stat') == str(2) + assert self.redis.get('resque:stat:test_stat') == b'2' stat_obj.incr(2) - assert self.redis.get('resque:stat:test_stat') == str(4) + assert self.redis.get('resque:stat:test_stat') == b'4' def test_decr(self): stat_obj = Stat('test_stat', self.resq) stat_obj.incr() stat_obj.incr() - assert self.redis.get('resque:stat:test_stat') == str(2) + assert self.redis.get('resque:stat:test_stat') == b'2' stat_obj.decr() - assert self.redis.get('resque:stat:test_stat') == str(1) + assert self.redis.get('resque:stat:test_stat') == b'1' stat_obj.incr() stat_obj.decr(2) - assert self.redis.get('resque:stat:test_stat') == str(0) + assert self.redis.get('resque:stat:test_stat') == b'0' def test_get(self): stat_obj = Stat('test_stat', self.resq) @@ -33,4 +33,4 @@ def test_clear(self): stat_obj.incr() assert self.redis.exists('resque:stat:test_stat') stat_obj.clear() - assert not self.redis.exists('resque:stat:test_stat') \ No newline at end of file + assert not self.redis.exists('resque:stat:test_stat') From 6d9fcd1ce3b365e1b3b44df2b2f6566d3a099477 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:31:02 -0700 Subject: [PATCH 069/102] impore properly --- pyres/failure/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/failure/redis.py b/pyres/failure/redis.py index 715f9de..5fe71ee 100644 --- a/pyres/failure/redis.py +++ b/pyres/failure/redis.py @@ -1,7 +1,7 @@ import datetime, time from base64 import b64encode -from base import BaseBackend +from .base import BaseBackend from pyres import ResQ class RedisBackend(BaseBackend): From b2374c430cbb7232f0b5d7677e960fb52a4240ed Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:33:48 -0700 Subject: [PATCH 070/102] update gitignore --- .gitignore | 2 ++ pyres/.compat.py.swo | Bin 4096 -> 0 bytes 2 files changed, 2 insertions(+) delete mode 100644 pyres/.compat.py.swo diff --git a/.gitignore b/.gitignore index 23a2c76..310a9ae 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ build/ logs/ dist/ *.swp +*.swo +.tox/ diff --git a/pyres/.compat.py.swo b/pyres/.compat.py.swo deleted file mode 100644 index 3eb14800a14458fc351e9627f72e593374d5e7d9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4096 zcmYc?2=nw+FxN9?00IF9hVqt&fzDM*406H@48{3*C8^mU3H%@_HLNd_Dw4S~@R7!85Z5Eu=C(GVC7fzc2c4S^vY0sv%q BC2;@% From f27561bd4eb9efa42c21a1155461b3ff951000d6 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 17:39:44 +0000 Subject: [PATCH 071/102] Converting basestring to compat.string_types --- pyres/horde.py | 95 +++++++++++++++++++++++----------------------- pyres/job.py | 8 ++-- pyres/scheduler.py | 3 +- pyres/worker.py | 8 ++-- 4 files changed, 58 insertions(+), 56 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index 70990fa..43bd2d6 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -12,6 +12,7 @@ from pyres.exceptions import NoQueueError from pyres.utils import OrderedDict from pyres.job import Job +from pyres.compat import string_types import pyres.json_parser as json try: from setproctitle import setproctitle @@ -20,7 +21,7 @@ def setproctitle(name): pass def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None): - + logger = multiprocessing.get_logger() #logger = multiprocessing.log_to_stderr() logger.setLevel(log_level) @@ -31,54 +32,54 @@ def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None) class Minion(multiprocessing.Process): def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None): multiprocessing.Process.__init__(self, name='Minion') - + #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s' #logHandler = logging.StreamHandler() #logHandler.setFormatter(logging.Formatter(format)) #self.logger = multiprocessing.get_logger() #self.logger.addHandler(logHandler) #self.logger.setLevel(logging.DEBUG) - + self.queues = queues self._shutdown = False self.hostname = os.uname()[1] self.server = server self.password = password - + self.log_level = log_level self.log_path = log_path self.log_file = None - + def prune_dead_workers(self): pass - + def schedule_shutdown(self, signum, frame): self._shutdown = True - + def register_signal_handlers(self): signal.signal(signal.SIGTERM, self.schedule_shutdown) signal.signal(signal.SIGINT, self.schedule_shutdown) signal.signal(signal.SIGQUIT, self.schedule_shutdown) - + def register_minion(self): self.resq.redis.sadd('resque:minions',str(self)) self.started = datetime.datetime.now() - + def startup(self): self.register_signal_handlers() self.prune_dead_workers() self.register_minion() - + def __str__(self): return '%s:%s:%s' % (self.hostname, self.pid, ','.join(self.queues)) - + def reserve(self): self.logger.debug('checking queues: %s' % self.queues) job = Job.reserve(self.queues, self.resq, self.__str__()) if job: self.logger.info('Found job on %s' % job._queue) return job - + def process(self, job): if not job: return @@ -95,7 +96,7 @@ def process(self, job): self.logger.info('completed job: %s' % job) finally: self.done_working() - + def working_on(self, job): setproctitle('pyres_minion:%s: working on job: %s' % (os.getppid(), job._payload)) self.logger.debug('marking as working on') @@ -108,25 +109,25 @@ def working_on(self, job): self.resq.redis["resque:minion:%s" % str(self)] = data self.logger.debug("minion:%s" % str(self)) #self.logger.debug(self.resq.redis["resque:minion:%s" % str(self)]) - + def failed(self): Stat("failed", self.resq).incr() - + def processed(self): total_processed = Stat("processed", self.resq) total_processed.incr() - + def done_working(self): self.logger.debug('done working') self.processed() self.resq.redis.delete("resque:minion:%s" % str(self)) - + def unregister_minion(self): self.resq.redis.srem('resque:minions',str(self)) self.started = None - + def work(self, interval=5): - + self.startup() while True: setproctitle('pyres_minion:%s: waiting for job on: %s' % (os.getppid(),self.queues)) @@ -140,11 +141,11 @@ def work(self, interval=5): else: time.sleep(interval) self.unregister_minion() - + def clear_logger(self): for handler in self.logger.handlers: self.logger.removeHandler(handler) - + def run(self): setproctitle('pyres_minion:%s: Starting' % (os.getppid(),)) if self.log_path: @@ -155,19 +156,19 @@ def run(self): namespace = 'minion:%s' % self.pid self.logger = setup_logging('minion', namespace, self.log_level, self.log_file) #self.clear_logger() - if isinstance(self.server,basestring): + if isinstance(self.server,string_types): self.resq = ResQ(server=self.server, password=self.password) elif isinstance(self.server, ResQ): self.resq = self.server else: raise Exception("Bad server argument") - - + + self.work() #while True: # job = self.q.get() # print 'pid: %s is running %s ' % (self.pid,job) - + class Khan(object): _command_map = { @@ -189,28 +190,28 @@ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=Non self.password = password self.logging_level = logging_level self.log_file = log_file - + #self._workers = list() - + def setup_resq(self): if hasattr(self,'logger'): self.logger.info('Connecting to redis server - %s' % self.server) - if isinstance(self.server,basestring): + if isinstance(self.server,string_types): self.resq = ResQ(server=self.server, password=self.password) elif isinstance(self.server, ResQ): self.resq = self.server else: raise Exception("Bad server argument") - + def validate_queues(self): "Checks if a worker is given atleast one queue to work on." if not self.queues: raise NoQueueError("Please give each worker at least one queue.") - + def startup(self): self.register_signal_handlers() - - + + def register_signal_handlers(self): signal.signal(signal.SIGTERM, self.schedule_shutdown) signal.signal(signal.SIGINT, self.schedule_shutdown) @@ -252,23 +253,23 @@ def current_state(self): def _schedule_shutdown(self): self.schedule_shutdown(None, None) - + def schedule_shutdown(self, signum, frame): self.logger.info('Khan Shutdown scheduled') self._shutdown = True - + def kill_child(self, signum, frame): self._remove_minion() - + def add_child(self, signum, frame): self.add_minion() - + def register_khan(self): if not hasattr(self, 'resq'): self.setup_resq() self.resq.redis.sadd('resque:khans',str(self)) self.started = datetime.datetime.now() - + def _check_commands(self): if not self._shutdown: self.logger.debug('Checking commands') @@ -277,7 +278,7 @@ def _check_commands(self): if command: self.process_command(command) self._check_commands() - + def process_command(self, command): self.logger.info('Processing Command') #available commands, shutdown, add 1, remove 1 @@ -286,13 +287,13 @@ def process_command(self, command): fn = getattr(self, command_item) if fn: fn() - + def add_minion(self): self._add_minion() self.resq.redis.srem('resque:khans',str(self)) self.pool_size += 1 self.resq.redis.sadd('resque:khans',str(self)) - + def _add_minion(self): if hasattr(self,'logger'): self.logger.info('Adding minion') @@ -309,7 +310,7 @@ def _add_minion(self): if hasattr(self,'logger'): self.logger.info('minion added at: %s' % m.pid) return m - + def _shutdown_minions(self): """ send the SIGNINT signal to each worker in the pool. @@ -318,7 +319,7 @@ def _shutdown_minions(self): for minion in self._workers.values(): minion.terminate() minion.join() - + def _remove_minion(self, pid=None): #if pid: # m = self._workers.pop(pid) @@ -328,20 +329,20 @@ def _remove_minion(self, pid=None): self.pool_size -= 1 self.resq.redis.sadd('resque:khans',str(self)) return m - + def unregister_khan(self): if hasattr(self,'logger'): self.logger.debug('unregistering khan') self.resq.redis.srem('resque:khans',str(self)) self.started = None - + def setup_minions(self): for i in range(self.pool_size): self._add_minion() def _setup_logging(self): self.logger = setup_logging('khan', 'khan', self.logging_level, self.log_file) - + def work(self, interval=2): setproctitle('pyres_manager: Starting') self.startup() @@ -363,11 +364,11 @@ def work(self, interval=2): else: time.sleep(interval) self.unregister_khan() - + def __str__(self): hostname = os.uname()[1] return '%s:%s:%s' % (hostname, self.pid, self.pool_size) - + @classmethod def run(cls, pool_size=5, queues=[], server='localhost:6379', logging_level=logging.INFO, log_file=None): worker = cls(pool_size=pool_size, queues=queues, server=server, logging_level=logging_level, log_file=log_file) diff --git a/pyres/job.py b/pyres/job.py index 5f88a0e..8358494 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -4,7 +4,7 @@ from pyres import ResQ, safe_str_to_class from pyres import failure from pyres.failure.redis import RedisBackend - +from pyres.compat import string_types class Job(object): """Every job on the ResQ is an instance of the *Job* class. @@ -26,7 +26,7 @@ class Job(object): """ safe_str_to_class = staticmethod(safe_str_to_class) - + def __init__(self, queue, payload, resq, worker=None): self._queue = queue self._payload = payload @@ -111,7 +111,7 @@ def retry(self, payload_class, args): """This method provides a way to retry a job after a failure. If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class. - + """ retry_every = getattr(payload_class, 'retry_every', None) retry_timeout = getattr(payload_class, 'retry_timeout', 0) @@ -133,7 +133,7 @@ def reserve(cls, queues, res, worker=None, timeout=10): that other workers will not pick it up. """ - if isinstance(queues, basestring): + if isinstance(queues, string_types): queues = [queues] queue, payload = res.pop(queues, timeout=timeout) if payload: diff --git a/pyres/scheduler.py b/pyres/scheduler.py index af5a4d6..11e4f58 100644 --- a/pyres/scheduler.py +++ b/pyres/scheduler.py @@ -3,6 +3,7 @@ import logging from pyres import ResQ, __version__ +from pyres.compat import string_types logger = logging.getLogger(__name__) @@ -14,7 +15,7 @@ def __init__(self, server="localhost:6379", password=None): >>> scheduler = Scheduler('localhost:6379') """ self._shutdown = False - if isinstance(server, basestring): + if isinstance(server, string_types): self.resq = ResQ(server=server, password=password) elif isinstance(server, ResQ): self.resq = server diff --git a/pyres/worker.py b/pyres/worker.py index 9befb1c..908def8 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -9,7 +9,7 @@ from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError from pyres.job import Job from pyres import ResQ, Stat, __version__ - +from pyres.compat import string_types logger = logging.getLogger(__name__) @@ -34,7 +34,7 @@ def __init__(self, queues=(), server="localhost:6379", password=None, timeout=No self.hostname = os.uname()[1] self.timeout = timeout - if isinstance(server, basestring): + if isinstance(server, string_types): self.resq = ResQ(server=server, password=password) elif isinstance(server, ResQ): self.resq = server @@ -350,7 +350,7 @@ def run(cls, queues, server="localhost:6379", interval=None, timeout=None): @classmethod def all(cls, host="localhost:6379"): - if isinstance(host,basestring): + if isinstance(host,string_types): resq = ResQ(host) elif isinstance(host, ResQ): resq = host @@ -359,7 +359,7 @@ def all(cls, host="localhost:6379"): @classmethod def working(cls, host): - if isinstance(host, basestring): + if isinstance(host, string_types): resq = ResQ(host) elif isinstance(host, ResQ): resq = host From 52a53eb0c9784321deb4bb543baef5cdfb6829aa Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 10:52:54 -0700 Subject: [PATCH 072/102] switch to pytest --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 99608d5..1657f6a 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,8 @@ envlist = py27, py33 [testenv] -commands = nosetests +commands = py.test deps = + pytest nose nosexcover From 1df8942a310c2b3bac31bd3d194c97a44baec46f Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 11:17:12 -0700 Subject: [PATCH 073/102] decode pop (we want strings!!) --- pyres/__init__.py | 2 +- pyres/compat.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index a2b353e..71784c6 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -159,7 +159,7 @@ def pop(self, queues, timeout=10): timeout=timeout) if ret: key, ret = ret - return key[13:], ResQ.decode(ret) # trim "resque:queue:" + return key[13:].decode(), ResQ.decode(ret.decode()) # trim "resque:queue:" else: return None, None diff --git a/pyres/compat.py b/pyres/compat.py index 4805b21..c39fd3f 100644 --- a/pyres/compat.py +++ b/pyres/compat.py @@ -17,6 +17,7 @@ binary_type = bytes long = int import subprocess as commands + else: string_types = basestring, integer_types = (int, long) From 2961b5eb113c9e0877b8b48a606bade37800b2b5 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 11:46:39 -0700 Subject: [PATCH 074/102] don't blow up in py3 --- pyres/failure/base.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pyres/failure/base.py b/pyres/failure/base.py index c902299..330fbe4 100644 --- a/pyres/failure/base.py +++ b/pyres/failure/base.py @@ -17,10 +17,14 @@ class BaseBackend(object): """ def __init__(self, exp, queue, payload, worker=None): - excc, _, tb = sys.exc_info() + excc = sys.exc_info()[0] self._exception = excc - self._traceback = traceback.format_exc() + try: + self._traceback = traceback.format_exc() + except AttributeError: + self._traceback = None + self._worker = worker self._queue = queue self._payload = payload From 607e08da090be42757cbc4f3c83c791c9738f1c6 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 18:51:12 +0000 Subject: [PATCH 075/102] Fixing some more worker tests --- pyres/__init__.py | 9 ++++----- pyres/worker.py | 2 +- tests/test_worker.py | 32 ++++++++++++++++---------------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 71784c6..d04a8ee 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -232,7 +232,10 @@ def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs): logger.debug("no arguments passed in.") def queues(self): - return self.redis.smembers("resque:queues") or [] + return [sm.decode() for sm in self.redis.smembers("resque:queues")] or [] + + def workers(self): + return [w.decode() for w in self.redis.smembers("resque:workers")] or [] def info(self): """Returns a dictionary of the current status of the pending jobs, @@ -263,10 +266,6 @@ def reserve(self, queues): def __str__(self): return "PyRes Client connected to %s" % self.dsn - def workers(self): - from pyres.worker import Worker - return Worker.all(self) - def working(self): from pyres.worker import Worker return Worker.working(self) diff --git a/pyres/worker.py b/pyres/worker.py index 908def8..952df6a 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -355,7 +355,7 @@ def all(cls, host="localhost:6379"): elif isinstance(host, ResQ): resq = host - return [Worker.find(w,resq) for w in resq.redis.smembers('resque:workers') or []] + return [Worker.find(w,resq) for w in resq.workers() or []] @classmethod def working(cls, host): diff --git a/tests/test_worker.py b/tests/test_worker.py index 184cb93..76455db 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -13,7 +13,7 @@ def test_worker_init(self): from pyres.exceptions import NoQueueError self.assertRaises(NoQueueError, Worker,[]) self.assertRaises(Exception, Worker,['test'],TestProcess()) - + def test_startup(self): worker = Worker(['basic']) worker.startup() @@ -24,13 +24,13 @@ def test_startup(self): assert signal.getsignal(signal.SIGINT) == worker.shutdown_all assert signal.getsignal(signal.SIGQUIT) == worker.schedule_shutdown assert signal.getsignal(signal.SIGUSR1) == worker.kill_child - + def test_register(self): worker = Worker(['basic']) worker.register_worker() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert self.redis.sismember('resque:workers',name) - + def test_unregister(self): worker = Worker(['basic']) worker.register_worker() @@ -46,7 +46,7 @@ def test_working_on(self): worker = Worker(['basic']) worker.working_on(job) assert self.redis.exists("resque:worker:%s" % name) - + def test_processed(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') worker = Worker(['basic']) @@ -60,7 +60,7 @@ def test_processed(self): assert self.redis.get("resque:stat:processed") == str(2) assert self.redis.get("resque:stat:processed:%s" % name) == str(2) assert worker.get_processed() == 2 - + def test_failed(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') worker = Worker(['basic']) @@ -74,7 +74,7 @@ def test_failed(self): assert self.redis.get("resque:stat:failed") == str(2) assert self.redis.get("resque:stat:failed:%s" % name) == str(2) assert worker.get_failed() == 2 - + def test_process(self): name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') self.resq.enqueue(Basic,"test1") @@ -89,8 +89,8 @@ def test_process(self): assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name) - - + + def test_signals(self): worker = Worker(['basic']) worker.startup() @@ -104,7 +104,7 @@ def test_signals(self): #worker.work() #assert worker.child assert not worker.kill_child(frame, signal.SIGUSR1) - + def test_job_failure(self): self.resq.enqueue(ErrorObject) worker = Worker(['basic']) @@ -113,7 +113,7 @@ def test_job_failure(self): assert not self.redis.get('resque:worker:%s' % worker) assert self.redis.get("resque:stat:failed") == str(1) assert self.redis.get("resque:stat:failed:%s" % name) == str(1) - + def test_get_job(self): worker = Worker(['basic']) self.resq.enqueue(Basic,"test1") @@ -126,7 +126,7 @@ def test_get_job(self): w2 = Worker(['basic']) print(w2.job()) assert w2.job() == {} - + def test_working(self): worker = Worker(['basic']) self.resq.enqueue_from_string('tests.Basic','basic','test1') @@ -138,18 +138,18 @@ def test_working(self): assert len(workers) == 1 assert str(worker) == str(workers[0]) assert worker != workers[0] - + def test_started(self): import datetime worker = Worker(['basic']) dt = datetime.datetime.now() worker.started = dt name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') - assert self.redis.get('resque:worker:%s:started' % name) == str(int(time.mktime(dt.timetuple()))) - assert worker.started == str(int(time.mktime(dt.timetuple()))) + assert self.redis.get('resque:worker:%s:started' % name).decode() == str(int(time.mktime(dt.timetuple()))) + assert worker.started.decode() == str(int(time.mktime(dt.timetuple()))) worker.started = None assert not self.redis.exists('resque:worker:%s:started' % name) - + def test_state(self): worker = Worker(['basic']) assert worker.state() == 'idle' @@ -160,7 +160,7 @@ def test_state(self): assert worker.state() == 'working' worker.done_working(job) assert worker.state() == 'idle' - + def test_prune_dead_workers(self): worker = Worker(['basic']) # we haven't registered this worker, so the assertion below holds assert self.redis.scard('resque:workers') == 0 From b624c7971fcd38ad7240ef496395b4149e93f8df Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 11:55:44 -0700 Subject: [PATCH 076/102] we need to pass strings to decode but bytes to b64 --- pyres/failure/redis.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyres/failure/redis.py b/pyres/failure/redis.py index 5fe71ee..0b44739 100644 --- a/pyres/failure/redis.py +++ b/pyres/failure/redis.py @@ -34,7 +34,8 @@ def all(cls, resq, start=0, count=1): ret_list = [] for i in items: - failure = ResQ.decode(i) + converted = i.decode() + failure = ResQ.decode(converted) failure['redis_value'] = b64encode(i) ret_list.append(failure) return ret_list From 48a52d83c3f60cf3bec3e42ae84701966043e29b Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 19:00:36 +0000 Subject: [PATCH 077/102] More test fixing --- pyres/__init__.py | 2 +- tests/test_resq.py | 26 +++++++++++++------------- tests/test_worker.py | 20 ++++++++++---------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index d04a8ee..bd589f0 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -256,7 +256,7 @@ def info(self): } def keys(self): - return [key.replace('resque:','') + return [key.decode().replace('resque:','') for key in self.redis.keys('resque:*')] def reserve(self, queues): diff --git a/tests/test_resq.py b/tests/test_resq.py index 7412899..1559202 100644 --- a/tests/test_resq.py +++ b/tests/test_resq.py @@ -10,14 +10,14 @@ def test_enqueue(self): ResQ._enqueue(Basic, "test3") assert self.redis.llen("resque:queue:basic") == 3 assert self.redis.sismember('resque:queues','basic') - + def test_push(self): self.resq.push('pushq','content-newqueue') self.resq.push('pushq','content2-newqueue') assert self.redis.llen('resque:queue:pushq') == 2 - assert self.redis.lindex('resque:queue:pushq', 0) == ResQ.encode('content-newqueue') - assert self.redis.lindex('resque:queue:pushq', 1) == ResQ.encode('content2-newqueue') - + assert self.redis.lindex('resque:queue:pushq', 0).decode() == ResQ.encode('content-newqueue') + assert self.redis.lindex('resque:queue:pushq', 1).decode() == ResQ.encode('content2-newqueue') + def test_pop(self): self.resq.push('pushq','content-newqueue') self.resq.push('pushq','content2-newqueue') @@ -43,25 +43,25 @@ def test_pop_two_queues(self): assert self.redis.llen('resque:queue:pushq1') == 0 assert self.redis.llen('resque:queue:pushq2') == 0 assert self.resq.pop(['pushq1', 'pushq2'], timeout=1) == (None, None) - + def test_peek(self): self.resq.enqueue(Basic,"test1") self.resq.enqueue(Basic,"test2") assert len(self.resq.peek('basic',0,20)) == 2 - + def test_size(self): self.resq.enqueue(Basic,"test1") self.resq.enqueue(Basic,"test2") assert self.resq.size('basic') == 2 assert self.resq.size('noq') == 0 - + def test_redis_property(self): from redis import Redis rq = ResQ(server="localhost:6379") red = Redis() #rq2 = ResQ(server=red) self.assertRaises(Exception, rq.redis,[Basic]) - + def test_info(self): self.resq.enqueue(Basic,"test1") self.resq.enqueue(TestProcess) @@ -73,14 +73,14 @@ def test_info(self): worker.register_worker() info = self.resq.info() assert info['workers'] == 1 - + def test_workers(self): worker = Worker(['basic']) worker.register_worker() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert len(self.resq.workers()) == 1 #assert Worker.find(name, self.resq) in self.resq.workers() - + def test_enqueue_from_string(self): self.resq.enqueue_from_string('tests.Basic','basic','test1') name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') @@ -91,7 +91,7 @@ def test_enqueue_from_string(self): assert not self.redis.get('resque:worker:%s' % worker) assert not self.redis.get("resque:stat:failed") assert not self.redis.get("resque:stat:failed:%s" % name) - + def test_remove_queue(self): self.resq.enqueue_from_string('tests.Basic','basic','test1') assert 'basic' in self.resq._watched_queues @@ -101,12 +101,12 @@ def test_remove_queue(self): assert 'basic' not in self.resq._watched_queues assert not self.redis.sismember('resque:queues','basic') assert not self.redis.exists('resque:queue:basic') - + def test_keys(self): self.resq.enqueue_from_string('tests.Basic','basic','test1') assert 'queue:basic' in self.resq.keys() assert 'queues' in self.resq.keys() - + def test_queues(self): assert self.resq.queues() == [] self.resq.enqueue_from_string('tests.Basic','basic','test1') diff --git a/tests/test_worker.py b/tests/test_worker.py index 76455db..164be9c 100644 --- a/tests/test_worker.py +++ b/tests/test_worker.py @@ -53,12 +53,12 @@ def test_processed(self): worker.processed() assert self.redis.exists("resque:stat:processed") assert self.redis.exists("resque:stat:processed:%s" % name) - assert self.redis.get("resque:stat:processed") == str(1) - assert self.redis.get("resque:stat:processed:%s" % name) == str(1) + assert self.redis.get("resque:stat:processed").decode() == str(1) + assert self.redis.get("resque:stat:processed:%s" % name).decode() == str(1) assert worker.get_processed() == 1 worker.processed() - assert self.redis.get("resque:stat:processed") == str(2) - assert self.redis.get("resque:stat:processed:%s" % name) == str(2) + assert self.redis.get("resque:stat:processed").decode() == str(2) + assert self.redis.get("resque:stat:processed:%s" % name).decode() == str(2) assert worker.get_processed() == 2 def test_failed(self): @@ -67,12 +67,12 @@ def test_failed(self): worker.failed() assert self.redis.exists("resque:stat:failed") assert self.redis.exists("resque:stat:failed:%s" % name) - assert self.redis.get("resque:stat:failed") == str(1) - assert self.redis.get("resque:stat:failed:%s" % name) == str(1) + assert self.redis.get("resque:stat:failed").decode() == str(1) + assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(1) assert worker.get_failed() == 1 worker.failed() - assert self.redis.get("resque:stat:failed") == str(2) - assert self.redis.get("resque:stat:failed:%s" % name) == str(2) + assert self.redis.get("resque:stat:failed").decode() == str(2) + assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(2) assert worker.get_failed() == 2 def test_process(self): @@ -111,8 +111,8 @@ def test_job_failure(self): worker.process() name = "%s:%s:%s" % (os.uname()[1],os.getpid(),'basic') assert not self.redis.get('resque:worker:%s' % worker) - assert self.redis.get("resque:stat:failed") == str(1) - assert self.redis.get("resque:stat:failed:%s" % name) == str(1) + assert self.redis.get("resque:stat:failed").decode() == str(1) + assert self.redis.get("resque:stat:failed:%s" % name).decode() == str(1) def test_get_job(self): worker = Worker(['basic']) From 29e580351644a4a5085b18c789adaddb0428df4a Mon Sep 17 00:00:00 2001 From: John Anderson Date: Sat, 13 Jul 2013 12:17:44 -0700 Subject: [PATCH 078/102] support py3 --- pyres/__init__.py | 8 +++++--- tests/test_schedule.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index bd589f0..bea6051 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -312,7 +312,7 @@ def delayed_queue_schedule_size(self): size = 0 length = self.redis.zcard('resque:delayed_queue_schedule') for i in self.redis.zrange('resque:delayed_queue_schedule',0,length): - size += self.delayed_timestamp_size(i) + size += self.delayed_timestamp_size(i.decode()) return size def delayed_timestamp_size(self, timestamp): @@ -326,7 +326,9 @@ def next_delayed_timestamp(self): timestamp = None if array: timestamp = array[0] - return timestamp + + if timestamp: + return timestamp.decode() def next_item_for_timestamp(self, timestamp): #key = int(time.mktime(timestamp.timetuple())) @@ -334,7 +336,7 @@ def next_item_for_timestamp(self, timestamp): ret = self.redis.lpop(key) item = None if ret: - item = ResQ.decode(ret) + item = ResQ.decode(ret.decode()) if self.redis.llen(key) == 0: self.redis.delete(key) self.redis.zrem('resque:delayed_queue_schedule', timestamp) diff --git a/tests/test_schedule.py b/tests/test_schedule.py index 27b21fb..903c825 100644 --- a/tests/test_schedule.py +++ b/tests/test_schedule.py @@ -73,4 +73,4 @@ def test_schedule_shutdown(self): scheduler = Scheduler(self.resq) scheduler.schedule_shutdown(19,'') assert scheduler._shutdown - \ No newline at end of file + From 4ed56f475305fb66e2942c294f1e1c6023cace6f Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 19:20:15 +0000 Subject: [PATCH 079/102] More test fixing --- pyres/failure/multiple.py | 4 +- pyres/horde.py | 2 +- pyres/utils.py | 98 --------------------------------------- 3 files changed, 3 insertions(+), 101 deletions(-) delete mode 100644 pyres/utils.py diff --git a/pyres/failure/multiple.py b/pyres/failure/multiple.py index e4d05f7..6362363 100644 --- a/pyres/failure/multiple.py +++ b/pyres/failure/multiple.py @@ -1,5 +1,5 @@ -from base import BaseBackend -from redis import RedisBackend +from pyres.failure.base import BaseBackend +from pyres.failure.redis import RedisBackend class MultipleBackend(BaseBackend): """Extends ``BaseBackend`` to provide support for delegating calls to multiple diff --git a/pyres/horde.py b/pyres/horde.py index 43bd2d6..b3462de 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -10,7 +10,7 @@ import logging.handlers from pyres import ResQ, Stat, get_logging_handler, special_log_file from pyres.exceptions import NoQueueError -from pyres.utils import OrderedDict +from collections import OrderedDict from pyres.job import Job from pyres.compat import string_types import pyres.json_parser as json diff --git a/pyres/utils.py b/pyres/utils.py deleted file mode 100644 index d640c11..0000000 --- a/pyres/utils.py +++ /dev/null @@ -1,98 +0,0 @@ -from UserDict import DictMixin - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - key = reversed(self).next() if last else iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - return len(self)==len(other) and \ - all(p==q for p, q in zip(self.items(), other.items())) - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other From 5a418104cbeecf92f69434a51d15cb81288d5859 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 19:49:34 +0000 Subject: [PATCH 080/102] Final fixes for py3 compat --- pyres/__init__.py | 12 ++++++------ pyres/failure/redis.py | 3 +-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index bea6051..362f2dd 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -159,7 +159,7 @@ def pop(self, queues, timeout=10): timeout=timeout) if ret: key, ret = ret - return key[13:].decode(), ResQ.decode(ret.decode()) # trim "resque:queue:" + return key[13:].decode(), ResQ.decode(ret) # trim "resque:queue:" else: return None, None @@ -336,7 +336,7 @@ def next_item_for_timestamp(self, timestamp): ret = self.redis.lpop(key) item = None if ret: - item = ResQ.decode(ret.decode()) + item = ResQ.decode(ret) if self.redis.llen(key) == 0: self.redis.delete(key) self.redis.zrem('resque:delayed_queue_schedule', timestamp) @@ -348,10 +348,10 @@ def encode(cls, item): @classmethod def decode(cls, item): - if isinstance(item, string_types): - ret = json.loads(item) - return ret - return None + if not isinstance(item, string_types): + item = item.decode() + ret = json.loads(item) + return ret @classmethod def _enqueue(cls, klass, *args): diff --git a/pyres/failure/redis.py b/pyres/failure/redis.py index 0b44739..5fe71ee 100644 --- a/pyres/failure/redis.py +++ b/pyres/failure/redis.py @@ -34,8 +34,7 @@ def all(cls, resq, start=0, count=1): ret_list = [] for i in items: - converted = i.decode() - failure = ResQ.decode(converted) + failure = ResQ.decode(i) failure['redis_value'] = b64encode(i) ret_list.append(failure) return ret_list From 06442cfa26ed72b753f94c87344714aaaeb16a77 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 20:16:34 +0000 Subject: [PATCH 081/102] Fixed py26 support --- pyres/compat.py | 1 + pyres/horde.py | 5 ++++- setup.py | 14 ++++++++++---- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/pyres/compat.py b/pyres/compat.py index c39fd3f..20732bf 100644 --- a/pyres/compat.py +++ b/pyres/compat.py @@ -8,6 +8,7 @@ # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 +PY26 = sys.version_info[0:2] == (2,6) if PY3: # pragma: no cover string_types = str, diff --git a/pyres/horde.py b/pyres/horde.py index b3462de..b150c93 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -10,7 +10,10 @@ import logging.handlers from pyres import ResQ, Stat, get_logging_handler, special_log_file from pyres.exceptions import NoQueueError -from collections import OrderedDict +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict from pyres.job import Job from pyres.compat import string_types import pyres.json_parser as json diff --git a/setup.py b/setup.py index e404e2f..b2bc918 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,13 @@ from setuptools import setup, find_packages +from pyres.compat import PY26 + +install_requires=[ + item for item in + open("requirements.txt").read().split("\n") + if item], + +if PY26: + install_requires.append('ordereddict') version='1.4.2' setup( @@ -20,10 +29,7 @@ pyres_scheduler=pyres.scripts:pyres_scheduler pyres_worker=pyres.scripts:pyres_worker """, - install_requires=[ - item for item in - open("requirements.txt").read().split("\n") - if item], + install_requires=install_requires, classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Console', From 1a73d1e1916b2ad85d5d1b7e58bbfdd39648cb1b Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 20:30:50 +0000 Subject: [PATCH 082/102] fixes for py26, added py33 to travis --- .travis.yml | 6 +++--- setup.py | 28 ++++++++++++++++++++++------ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index c8a616d..66b134f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,10 +2,10 @@ language: python python: - "2.6" - "2.7" + - "3.3" # - "pypy" # command to install dependencies install: - - pip install -r requirements-test.txt --use-mirrors - - pip install -r requirements.txt --use-mirrors + - python setup.py install # command to run tests -script: nosetests +script: python setup.py test diff --git a/setup.py b/setup.py index b2bc918..7732582 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,27 @@ +import sys from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand from pyres.compat import PY26 -install_requires=[ - item for item in - open("requirements.txt").read().split("\n") - if item], +requires=[ + item for item in + open("requirements.txt").read().split("\n") + if item] if PY26: - install_requires.append('ordereddict') + requires.append('ordereddict') + +class PyTest(TestCommand): + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + #import here, cause outside the eggs aren't loaded + import pytest + result = pytest.main(self.test_args) + sys.exit(result) version='1.4.2' setup( @@ -29,7 +43,9 @@ pyres_scheduler=pyres.scripts:pyres_scheduler pyres_worker=pyres.scripts:pyres_worker """, - install_requires=install_requires, + tests_require=requires + ['pytest',], + cmdclass={'test': PyTest}, + install_requires=requires, classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Console', From f7c43084f558ae6921625b8f17c200c156b6424c Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Sat, 13 Jul 2013 20:34:49 +0000 Subject: [PATCH 083/102] Fixing import issue with compat.PY26 --- pyres/compat.py | 1 - setup.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pyres/compat.py b/pyres/compat.py index 20732bf..c39fd3f 100644 --- a/pyres/compat.py +++ b/pyres/compat.py @@ -8,7 +8,6 @@ # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 -PY26 = sys.version_info[0:2] == (2,6) if PY3: # pragma: no cover string_types = str, diff --git a/setup.py b/setup.py index 7732582..bb79183 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,13 @@ import sys from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand -from pyres.compat import PY26 requires=[ item for item in open("requirements.txt").read().split("\n") if item] -if PY26: +if sys.version_info[0:2] == (2,6): requires.append('ordereddict') class PyTest(TestCommand): From 401d963eeef7100616ce94748c9f41108b6a05f4 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Mon, 15 Jul 2013 09:24:51 -0700 Subject: [PATCH 084/102] Update README.markdown --- README.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.markdown b/README.markdown index 0af6efb..ef8d07c 100644 --- a/README.markdown +++ b/README.markdown @@ -10,7 +10,7 @@ Because of some differences between ruby and python, there are a couple of place ## Travis CI -Currently, pyres is being tested via travis ci for python version 2.6, 2.7, and pypy: +Currently, pyres is being tested via travis ci for python version 2.6, 2.7, and 3.3: [![Build Status](https://secure.travis-ci.org/binarydud/pyres.png)](http://travis-ci.org/binarydud/pyres) ## Running Tests From c6c965313d012cda27b9fd078e5691f8d7aa3f6b Mon Sep 17 00:00:00 2001 From: Bamco Date: Tue, 16 Jul 2013 17:37:31 +0300 Subject: [PATCH 085/102] Removed unnecessary lines in ResQ docstring. --- pyres/__init__.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 362f2dd..3326b6e 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -121,14 +121,6 @@ class ResQ(object): ``password`` -- The password, if required, of your Redis server. Default is "None". - ``timeout`` -- The timeout keyword is in the signature, but is unused. Default is "None". - - ``retry_connection`` -- This keyword is in the signature but is deprecated. Default is "True". - - - Both ``timeout`` and ``retry_connection`` will be removed as the python-redis client - no longer uses them. - Example usage:: >>> from pyres import * From 3d0309e08c70622dc6c3cac683cb8294290b443f Mon Sep 17 00:00:00 2001 From: John Anderson Date: Thu, 18 Jul 2013 16:25:59 -0700 Subject: [PATCH 086/102] Update setup.py --- setup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.py b/setup.py index bb79183..966dad0 100644 --- a/setup.py +++ b/setup.py @@ -51,5 +51,8 @@ def run_tests(self): 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', 'Programming Language :: Python'], ) From fb8c8aa5fe53b7310e646d4f8d1373770988c3a3 Mon Sep 17 00:00:00 2001 From: kracekumar Date: Tue, 17 Sep 2013 22:03:20 +0530 Subject: [PATCH 087/102] Removed coverage.report --- .gitignore | 1 + coverage.report | 11 ----------- 2 files changed, 1 insertion(+), 11 deletions(-) delete mode 100644 coverage.report diff --git a/.gitignore b/.gitignore index 310a9ae..e3a0b9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.pyc build/ .coverage +*.report *.egg-info logs/ dist/ diff --git a/coverage.report b/coverage.report deleted file mode 100644 index d9c33d7..0000000 --- a/coverage.report +++ /dev/null @@ -1,11 +0,0 @@ -Name Stmts Exec Cover Missing ------------------------------------------------- -pyres 138 131 94% 26, 39, 98, 133-134, 144-145 -pyres.exceptions 2 2 100% -pyres.failure 23 22 95% 41 -pyres.job 23 23 100% -pyres.worker 189 146 77% 66, 74, 84-112, 161, 179, 186, 230-241 ------------------------------------------------- -TOTAL 375 324 86% ----------------------------------------------------------------------- -Ran 32 tests in 0.884s \ No newline at end of file From d036a5d0b3b2e57174391b4d8aca9defff8b87f0 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Fri, 11 Oct 2013 16:06:04 -0300 Subject: [PATCH 088/102] Implementing password support on command line to worker, scheduler and manager --- pyres/horde.py | 4 ++-- pyres/scripts.py | 15 +++++++++------ pyres/worker.py | 4 ++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index b150c93..6ea1eb3 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -373,8 +373,8 @@ def __str__(self): return '%s:%s:%s' % (hostname, self.pid, self.pool_size) @classmethod - def run(cls, pool_size=5, queues=[], server='localhost:6379', logging_level=logging.INFO, log_file=None): - worker = cls(pool_size=pool_size, queues=queues, server=server, logging_level=logging_level, log_file=log_file) + def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, log_file=None): + worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, log_file=log_file) worker.work() #if __name__ == "__main__": diff --git a/pyres/scripts.py b/pyres/scripts.py index 55c0854..659a77f 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -13,7 +13,8 @@ def pyres_manager(): parser = OptionParser(usage=usage) #parser.add_option("-q", dest="queue_list") parser.add_option("--host", dest="host", default="localhost") - parser.add_option("--port",dest="port",type="int", default=6379) + parser.add_option("--port", dest="port",type="int", default=6379) + parser.add_option("--password", dest="password", default=None) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.") @@ -36,7 +37,7 @@ def pyres_manager(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) - Khan.run(pool_size=options.pool_size, queues=queues, server=server, logging_level=log_level, log_file=options.logfile) + Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, logging_level=log_level, log_file=options.logfile) def pyres_scheduler(): @@ -44,7 +45,8 @@ def pyres_scheduler(): parser = OptionParser(usage=usage) #parser.add_option("-q", dest="queue_list") parser.add_option("--host", dest="host", default="localhost") - parser.add_option("--port",dest="port",type="int", default=6379) + parser.add_option("--port", dest="port",type="int", default=6379) + parser.add_option("--password", dest="password", default=None) parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') @@ -54,7 +56,7 @@ def pyres_scheduler(): setup_logging(procname="pyres_scheduler", log_level=log_level, filename=options.logfile) setup_pidfile(options.pidfile) server = '%s:%s' % (options.host, options.port) - Scheduler.run(server) + Scheduler.run(server, password) def pyres_worker(): @@ -62,7 +64,8 @@ def pyres_worker(): parser = OptionParser(usage=usage) parser.add_option("--host", dest="host", default="localhost") - parser.add_option("--port",dest="port",type="int", default=6379) + parser.add_option("--port", dest="port",type="int", default=6379) + parser.add_option("--password", dest="password", default=None) parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') @@ -86,4 +89,4 @@ def pyres_worker(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) - Worker.run(queues, server, interval, timeout=timeout) + Worker.run(queues, server, password, interval, timeout=timeout) diff --git a/pyres/worker.py b/pyres/worker.py index 952df6a..fc42b12 100644 --- a/pyres/worker.py +++ b/pyres/worker.py @@ -341,8 +341,8 @@ def worker_pids(cls): return [] @classmethod - def run(cls, queues, server="localhost:6379", interval=None, timeout=None): - worker = cls(queues=queues, server=server, timeout=timeout) + def run(cls, queues, server="localhost:6379", password=None, interval=None, timeout=None): + worker = cls(queues=queues, server=server, password=password, timeout=timeout) if interval is not None: worker.work(interval) else: From 4b747eb57c043f2c1aa28ef5a8d10eaba409e9dc Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Mon, 14 Oct 2013 14:27:49 -0300 Subject: [PATCH 089/102] getting password option to password var --- pyres/scripts.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyres/scripts.py b/pyres/scripts.py index 659a77f..f59bf00 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -37,6 +37,7 @@ def pyres_manager(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) + password = options.password Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, logging_level=log_level, log_file=options.logfile) @@ -56,6 +57,7 @@ def pyres_scheduler(): setup_logging(procname="pyres_scheduler", log_level=log_level, filename=options.logfile) setup_pidfile(options.pidfile) server = '%s:%s' % (options.host, options.port) + password = options.password Scheduler.run(server, password) @@ -89,4 +91,5 @@ def pyres_worker(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) + password = options.password Worker.run(queues, server, password, interval, timeout=timeout) From b3ad393f22b0f6d024cfabffedf64d600837dbb1 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Mon, 14 Oct 2013 16:39:24 -0300 Subject: [PATCH 090/102] Receiving command line interval properly on manager --- pyres/horde.py | 4 ++-- pyres/scripts.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index 6ea1eb3..e156eb7 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -373,8 +373,8 @@ def __str__(self): return '%s:%s:%s' % (hostname, self.pid, self.pool_size) @classmethod - def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, log_file=None): - worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, log_file=log_file) + def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, logging_level=logging.INFO, log_file=None): + worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, interval=interval, logging_level=logging_level, log_file=log_file) worker.work() #if __name__ == "__main__": diff --git a/pyres/scripts.py b/pyres/scripts.py index f59bf00..26e07d7 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -38,7 +38,7 @@ def pyres_manager(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) password = options.password - Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, logging_level=log_level, log_file=options.logfile) + Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=interval, logging_level=log_level, log_file=options.logfile) def pyres_scheduler(): From d492bc3e412b74941697c59aa045dd344b63db63 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Mon, 14 Oct 2013 17:13:08 -0300 Subject: [PATCH 091/102] passing interval to work method, not init --- pyres/horde.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index e156eb7..5db0dc9 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -374,8 +374,8 @@ def __str__(self): @classmethod def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, logging_level=logging.INFO, log_file=None): - worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, interval=interval, logging_level=logging_level, log_file=log_file) - worker.work() + worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, log_file=log_file) + worker.work(interval=interval) #if __name__ == "__main__": # k = Khan() From e25091be367bbdcfe2d9393684b9ea84b0292ab7 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Tue, 15 Oct 2013 10:46:49 -0300 Subject: [PATCH 092/102] implementing minions interval on manager command line --- pyres/horde.py | 20 ++++++++++++++------ pyres/scripts.py | 16 +++++++++++----- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index 5db0dc9..ce23210 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -33,7 +33,7 @@ def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None) return logger class Minion(multiprocessing.Process): - def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None): + def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5): multiprocessing.Process.__init__(self, name='Minion') #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s' @@ -48,6 +48,7 @@ def __init__(self, queues, server, password, log_level=logging.INFO, log_path=No self.hostname = os.uname()[1] self.server = server self.password = password + self.interval = interval self.log_level = log_level self.log_path = log_path @@ -142,6 +143,7 @@ def work(self, interval=5): if job: self.process(job) else: + self.logger.debug('minion sleeping for: %i secs' % interval) time.sleep(interval) self.unregister_minion() @@ -167,7 +169,7 @@ def run(self): raise Exception("Bad server argument") - self.work() + self.work(self.interval) #while True: # job = self.q.get() # print 'pid: %s is running %s ' % (self.pid,job) @@ -179,7 +181,8 @@ class Khan(object): 'REMOVE': '_remove_minion', 'SHUTDOWN': '_schedule_shutdown' } - def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, log_file=None): + def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, + log_file=None, minions_interval=5): #super(Khan,self).__init__(queues=queues,server=server,password=password) self._shutdown = False self.pool_size = int(pool_size) @@ -193,6 +196,7 @@ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=Non self.password = password self.logging_level = logging_level self.log_file = log_file + self.minions_interval = minions_interval #self._workers = list() @@ -307,7 +311,8 @@ def _add_minion(self): log_path = os.path.dirname(self.log_file) else: log_path = None - m = Minion(self.queues, self.server, self.password, log_level=self.logging_level, log_path=log_path) + m = Minion(self.queues, self.server, self.password, interval=self.minions_interval, + log_level=self.logging_level, log_path=log_path) m.start() self._workers[m.pid] = m if hasattr(self,'logger'): @@ -365,6 +370,7 @@ def work(self, interval=2): break #get job else: + self.logger.debug('manager sleeping for: %i secs' % interval) time.sleep(interval) self.unregister_khan() @@ -373,8 +379,10 @@ def __str__(self): return '%s:%s:%s' % (hostname, self.pid, self.pool_size) @classmethod - def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, logging_level=logging.INFO, log_file=None): - worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, log_file=log_file) + def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, + logging_level=logging.INFO, log_file=None, minions_interval=5): + worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, + log_file=log_file, minions_interval=minions_interval) worker.work(interval=interval) #if __name__ == "__main__": diff --git a/pyres/scripts.py b/pyres/scripts.py index 26e07d7..30bc4a6 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -15,7 +15,8 @@ def pyres_manager(): parser.add_option("--host", dest="host", default="localhost") parser.add_option("--port", dest="port",type="int", default=6379) parser.add_option("--password", dest="password", default=None) - parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs') + parser.add_option("-i", '--interval', dest='manager_interval', default=None, help='the default time interval to sleep between runs - manager') + parser.add_option("--minions_interval", dest='minions_interval', default=None, help='the default time interval to sleep between runs - minions') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.") parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') @@ -31,14 +32,19 @@ def pyres_manager(): setup_pidfile(options.pidfile) - interval = options.interval - if interval is not None: - interval = float(interval) + manager_interval = options.manager_interval + if manager_interval is not None: + manager_interval = float(manager_interval) + + minions_interval = options.minions_interval + if minions_interval is not None: + minions_interval = float(minions_interval) queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) password = options.password - Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=interval, logging_level=log_level, log_file=options.logfile) + Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=interval, + logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval) def pyres_scheduler(): From 004efa61a9774c98696660de4259bd14fcbbd155 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Tue, 15 Oct 2013 11:25:54 -0300 Subject: [PATCH 093/102] replacing interval for manager_interval --- pyres/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/scripts.py b/pyres/scripts.py index 30bc4a6..75af6b7 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -43,7 +43,7 @@ def pyres_manager(): queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) password = options.password - Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=interval, + Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=manager_interval, logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval) From 51bcf4264cdb47eef5503b7f27366ee2befc9d06 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Tue, 15 Oct 2013 13:56:56 -0300 Subject: [PATCH 094/102] concat minions logs // implementing minion max process jobs --- pyres/horde.py | 26 +++++++++++++++++++++----- pyres/scripts.py | 9 ++++++--- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index ce23210..6c0d0ae 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -33,7 +33,8 @@ def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None) return logger class Minion(multiprocessing.Process): - def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5): + def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5, concact_logs=False, + max_jobs=None): multiprocessing.Process.__init__(self, name='Minion') #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s' @@ -53,6 +54,7 @@ def __init__(self, queues, server, password, log_level=logging.INFO, log_path=No self.log_level = log_level self.log_path = log_path self.log_file = None + self.concact_logs = concact_logs def prune_dead_workers(self): pass @@ -133,16 +135,24 @@ def unregister_minion(self): def work(self, interval=5): self.startup() + cur_job = 0 while True: setproctitle('pyres_minion:%s: waiting for job on: %s' % (os.getppid(),self.queues)) self.logger.info('waiting on job') if self._shutdown: self.logger.info('shutdown scheduled') break + if self.max_jobs and self.max_jobs > cur_job: + self.logger.debug('max_jobs reached on %s: %s' % (os.getppid(), cur_job)) + self.logger.debug('minion sleeping for: %i secs' % interval) + time.sleep(interval) + cur_job = 0 job = self.reserve() if job: self.process(job) + cur_job = cur_job + 1 else: + cur_job = 0 self.logger.debug('minion sleeping for: %i secs' % interval) time.sleep(interval) self.unregister_minion() @@ -156,6 +166,8 @@ def run(self): if self.log_path: if special_log_file(self.log_path): self.log_file = self.log_path + elif self.concat_logs: + self.log_file = os.path.join(self.log_path, 'minion.log') else: self.log_file = os.path.join(self.log_path, 'minion-%s.log' % self.pid) namespace = 'minion:%s' % self.pid @@ -182,7 +194,7 @@ class Khan(object): 'SHUTDOWN': '_schedule_shutdown' } def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, - log_file=None, minions_interval=5): + log_file=None, minions_interval=5, minions_concact_logs=False, max_jobs=None): #super(Khan,self).__init__(queues=queues,server=server,password=password) self._shutdown = False self.pool_size = int(pool_size) @@ -197,6 +209,8 @@ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=Non self.logging_level = logging_level self.log_file = log_file self.minions_interval = minions_interval + self.minions_concact_logs = minions_concact_logs + self.max_jobs = max_jobs #self._workers = list() @@ -312,7 +326,8 @@ def _add_minion(self): else: log_path = None m = Minion(self.queues, self.server, self.password, interval=self.minions_interval, - log_level=self.logging_level, log_path=log_path) + log_level=self.logging_level, log_path=log_path, concact_logs=self.minions_concact_logs, + max_jobs=self.max_jobs) m.start() self._workers[m.pid] = m if hasattr(self,'logger'): @@ -380,9 +395,10 @@ def __str__(self): @classmethod def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, - logging_level=logging.INFO, log_file=None, minions_interval=5): + logging_level=logging.INFO, log_file=None, minions_interval=5, minions_concact_logs=False, max_jobs=None): worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, - log_file=log_file, minions_interval=minions_interval) + log_file=log_file, minions_interval=minions_interval, minions_concact_logs=minions_concact_logs, + max_jobs=max_jobs) worker.work(interval=interval) #if __name__ == "__main__": diff --git a/pyres/scripts.py b/pyres/scripts.py index 75af6b7..6d9a0af 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -19,8 +19,10 @@ def pyres_manager(): parser.add_option("--minions_interval", dest='minions_interval', default=None, help='the default time interval to sleep between runs - minions') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.") + parser.add_option("-j", "--process_max_jobs", dest="max_jobs", default=None, help='how many jobs should be processed on worker run.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') + parser.add_option("--concat_minions_log", action="store_true", dest="concat_minions_log", help='Concat all minions logs on same file.') (options,args) = parser.parse_args() if len(args) != 1: @@ -29,7 +31,7 @@ def pyres_manager(): log_level = getattr(logging, options.log_level.upper(), 'INFO') #logging.basicConfig(level=log_level, format="%(asctime)s: %(levelname)s: %(message)s") - + concat_minions_log = options.concat_minions_log setup_pidfile(options.pidfile) manager_interval = options.manager_interval @@ -38,13 +40,14 @@ def pyres_manager(): minions_interval = options.minions_interval if minions_interval is not None: - minions_interval = float(minions_interval) + minions_interval = float(minions_interval) queues = args[0].split(',') server = '%s:%s' % (options.host,options.port) password = options.password Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=manager_interval, - logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval) + logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval, + concat_minions_log=concat_minions_log, max_jobs=options.max_jobs) def pyres_scheduler(): From 0da62c419564b3cf6b1303658c7a030af1cfa2b6 Mon Sep 17 00:00:00 2001 From: Paulo Sousa Date: Tue, 15 Oct 2013 15:52:49 -0300 Subject: [PATCH 095/102] fixing concat_minions_logs and max_jobs --- pyres/horde.py | 28 +++++++++++++++------------- pyres/scripts.py | 8 ++++---- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/pyres/horde.py b/pyres/horde.py index 6c0d0ae..b41b7a4 100644 --- a/pyres/horde.py +++ b/pyres/horde.py @@ -33,8 +33,8 @@ def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None) return logger class Minion(multiprocessing.Process): - def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5, concact_logs=False, - max_jobs=None): + def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5, concat_logs=False, + max_jobs=0): multiprocessing.Process.__init__(self, name='Minion') #format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s' @@ -54,7 +54,8 @@ def __init__(self, queues, server, password, log_level=logging.INFO, log_path=No self.log_level = log_level self.log_path = log_path self.log_file = None - self.concact_logs = concact_logs + self.concat_logs = concat_logs + self.max_jobs = max_jobs def prune_dead_workers(self): pass @@ -142,9 +143,10 @@ def work(self, interval=5): if self._shutdown: self.logger.info('shutdown scheduled') break - if self.max_jobs and self.max_jobs > cur_job: - self.logger.debug('max_jobs reached on %s: %s' % (os.getppid(), cur_job)) - self.logger.debug('minion sleeping for: %i secs' % interval) + self.logger.debug('max_jobs: %d cur_jobs: %d' % (self.max_jobs, cur_job)) + if (self.max_jobs > 0 and self.max_jobs < cur_job): + self.logger.debug('max_jobs reached on %s: %d' % (self.pid, cur_job)) + self.logger.debug('minion sleeping for: %d secs' % interval) time.sleep(interval) cur_job = 0 job = self.reserve() @@ -153,7 +155,7 @@ def work(self, interval=5): cur_job = cur_job + 1 else: cur_job = 0 - self.logger.debug('minion sleeping for: %i secs' % interval) + self.logger.debug('minion sleeping for: %d secs' % interval) time.sleep(interval) self.unregister_minion() @@ -194,7 +196,7 @@ class Khan(object): 'SHUTDOWN': '_schedule_shutdown' } def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, - log_file=None, minions_interval=5, minions_concact_logs=False, max_jobs=None): + log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0): #super(Khan,self).__init__(queues=queues,server=server,password=password) self._shutdown = False self.pool_size = int(pool_size) @@ -209,7 +211,7 @@ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=Non self.logging_level = logging_level self.log_file = log_file self.minions_interval = minions_interval - self.minions_concact_logs = minions_concact_logs + self.concat_minions_logs = concat_minions_logs self.max_jobs = max_jobs #self._workers = list() @@ -326,7 +328,7 @@ def _add_minion(self): else: log_path = None m = Minion(self.queues, self.server, self.password, interval=self.minions_interval, - log_level=self.logging_level, log_path=log_path, concact_logs=self.minions_concact_logs, + log_level=self.logging_level, log_path=log_path, concat_logs=self.concat_minions_logs, max_jobs=self.max_jobs) m.start() self._workers[m.pid] = m @@ -385,7 +387,7 @@ def work(self, interval=2): break #get job else: - self.logger.debug('manager sleeping for: %i secs' % interval) + self.logger.debug('manager sleeping for: %d secs' % interval) time.sleep(interval) self.unregister_khan() @@ -395,9 +397,9 @@ def __str__(self): @classmethod def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2, - logging_level=logging.INFO, log_file=None, minions_interval=5, minions_concact_logs=False, max_jobs=None): + logging_level=logging.INFO, log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0): worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level, - log_file=log_file, minions_interval=minions_interval, minions_concact_logs=minions_concact_logs, + log_file=log_file, minions_interval=minions_interval, concat_minions_logs=concat_minions_logs, max_jobs=max_jobs) worker.work(interval=interval) diff --git a/pyres/scripts.py b/pyres/scripts.py index 6d9a0af..ff2d466 100644 --- a/pyres/scripts.py +++ b/pyres/scripts.py @@ -19,10 +19,10 @@ def pyres_manager(): parser.add_option("--minions_interval", dest='minions_interval', default=None, help='the default time interval to sleep between runs - minions') parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.') parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.") - parser.add_option("-j", "--process_max_jobs", dest="max_jobs", default=None, help='how many jobs should be processed on worker run.') + parser.add_option("-j", "--process_max_jobs", dest="max_jobs", type=int, default=0, help='how many jobs should be processed on worker run.') parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.') parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.') - parser.add_option("--concat_minions_log", action="store_true", dest="concat_minions_log", help='Concat all minions logs on same file.') + parser.add_option("--concat_minions_logs", action="store_true", dest="concat_minions_logs", help='Concat all minions logs on same file.') (options,args) = parser.parse_args() if len(args) != 1: @@ -31,7 +31,7 @@ def pyres_manager(): log_level = getattr(logging, options.log_level.upper(), 'INFO') #logging.basicConfig(level=log_level, format="%(asctime)s: %(levelname)s: %(message)s") - concat_minions_log = options.concat_minions_log + concat_minions_logs = options.concat_minions_logs setup_pidfile(options.pidfile) manager_interval = options.manager_interval @@ -47,7 +47,7 @@ def pyres_manager(): password = options.password Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=manager_interval, logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval, - concat_minions_log=concat_minions_log, max_jobs=options.max_jobs) + concat_minions_logs=concat_minions_logs, max_jobs=options.max_jobs) def pyres_scheduler(): From 3ec8d1751d491243d2d43a710a47ad95d584c136 Mon Sep 17 00:00:00 2001 From: Danny Cosson Date: Mon, 28 Oct 2013 14:56:43 -0400 Subject: [PATCH 096/102] when getting delayed timestamps, limit the query to one result since only the first item in that list is ever used --- pyres/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 3326b6e..cc366dc 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -314,7 +314,7 @@ def delayed_timestamp_size(self, timestamp): def next_delayed_timestamp(self): key = int(time.mktime(ResQ._current_time().timetuple())) array = self.redis.zrangebyscore('resque:delayed_queue_schedule', - '-inf', key) + '-inf', key, start=0, num=1) timestamp = None if array: timestamp = array[0] From fbc7bfe7730c8894edbd53e0500c236202dd6c11 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 16 Apr 2014 19:28:39 -0600 Subject: [PATCH 097/102] version bump for new release --- pyres/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index cc366dc..53bc6ec 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.4.2' +__version__ = '1.5' from redis import Redis from pyres.compat import string_types diff --git a/setup.py b/setup.py index 966dad0..e0597ed 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ def run_tests(self): result = pytest.main(self.test_args) sys.exit(result) -version='1.4.2' +version='1.5' setup( name='pyres', version=version, From 3140ba04c35734b5ae69f4b1bf2b7877e2dd1547 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 16 Apr 2014 19:32:37 -0600 Subject: [PATCH 098/102] adding redis support to ci --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 66b134f..cafbb59 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,3 +9,5 @@ install: - python setup.py install # command to run tests script: python setup.py test +services: + - redis-server From 49aebb24f8a58c6993ffeb1ea46add624c2706f7 Mon Sep 17 00:00:00 2001 From: Matt George Date: Wed, 16 Apr 2014 20:02:32 -0600 Subject: [PATCH 099/102] trying 3.4 support --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index cafbb59..3a1cd4e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ python: - "2.6" - "2.7" - "3.3" + - "3.4" # - "pypy" # command to install dependencies install: From 943a4a2808656a532fd9ab095b9df93833b81637 Mon Sep 17 00:00:00 2001 From: John Anderson Date: Wed, 6 Aug 2014 08:40:23 -0700 Subject: [PATCH 100/102] Always run after_perform, you should check 'failed' key --- pyres/job.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyres/job.py b/pyres/job.py index 8358494..4f4b547 100644 --- a/pyres/job.py +++ b/pyres/job.py @@ -82,7 +82,6 @@ def perform(self): payload_class.before_perform(metadata) return payload_class.perform(*args) except Exception as e: - check_after = False metadata["failed"] = True metadata["exception"] = e if not self.retry(payload_class, args): @@ -93,8 +92,10 @@ def perform(self): logging.exception("Retry scheduled after error in %s", self._payload) finally: after_perform = getattr(payload_class, "after_perform", None) - if after_perform and check_after: + + if after_perform: payload_class.after_perform(metadata) + delattr(payload_class,'resq') def fail(self, exception): From 1bc12b635d31dbfa2b166064307c2762627a1590 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Sun, 5 Oct 2014 23:01:34 -0700 Subject: [PATCH 101/102] Small tweaks to example.rst wold => world Add Python code block for syntax highlighting. --- docs/source/example.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/source/example.rst b/docs/source/example.rst index 4c67e23..33d8c93 100644 --- a/docs/source/example.rst +++ b/docs/source/example.rst @@ -1,7 +1,7 @@ Example ========= -Let's take a real wold example of a blog where comments need to be checked for +Let's take a real world example of a blog where comments need to be checked for spam. When the comment is saved in the database, we create a job in the queue with that comment data. Let's take a django model in this case. @@ -33,7 +33,9 @@ You can convert your existing class to be compatible with pyres. All you need to do is add a :attr:`queue` attribute and define a :meth:`perform` method on the class. -To insert a job into the queue you need to do something like this:: +To insert a job into the queue you need to do something like this: + +.. code-block:: python >>> from pyres import ResQ >>> r = ResQ() @@ -47,4 +49,3 @@ In the **scripts** folder there is an executable:: Just pass a comma separated list of queues the worker should poll. - From c100e3f4384242351bc5cedb06589f5c8195000b Mon Sep 17 00:00:00 2001 From: toby cabot Date: Wed, 13 May 2015 15:44:46 -0400 Subject: [PATCH 102/102] handle jobs queued by Ruby Resque with no module Ruby compatibility: Resque sends just a class name and not a module name so if I use Resque to queue a ruby class called "Worker" then Pyres will throw a "ValueError: Empty module name" exception. To avoid that, if there's no module name in the json then we'll use the class name as a module name. --- pyres/__init__.py | 9 +++++++++ tests/__init__.py | 12 +++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pyres/__init__.py b/pyres/__init__.py index 53bc6ec..011cd88 100644 --- a/pyres/__init__.py +++ b/pyres/__init__.py @@ -90,6 +90,15 @@ def safe_str_to_class(s): klass = lst[-1] mod_list = lst[:-1] module = ".".join(mod_list) + + # ruby compatibility kludge: resque sends just a class name and + # not a module name so if I use resque to queue a ruby class + # called "Worker" then pyres will throw a "ValueError: Empty + # module name" exception. To avoid that, if there's no module in + # the json then we'll use the classname as a module name. + if not module: + module = klass + mod = my_import(module) if hasattr(mod, klass): return getattr(mod, klass) diff --git a/tests/__init__.py b/tests/__init__.py index d75c866..64f09eb 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,6 +2,14 @@ import os from pyres import ResQ, str_to_class +class tests(object): + queue = 'basic' + + @staticmethod + def perform(name): + s = "name:%s" % name + return s + class Basic(object): queue = 'basic' @@ -111,7 +119,9 @@ def test_safe_str_to_class(self): assert safe_str_to_class('tests.Basic') == Basic self.assertRaises(ImportError, safe_str_to_class, 'test.Mine') self.assertRaises(ImportError, safe_str_to_class, 'tests.World') - + # test that we'll use the class name as a module name if no + # module name is provided (for Ruby compatibility) + assert safe_str_to_class('tests') == tests class PyResTests(unittest.TestCase): def setUp(self):