From 8a9839d5779e912ce31f49bea416c9de10ec93d8 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 11 Mar 2014 11:26:48 -0400 Subject: [PATCH 01/83] Add API for handling throttling --- lib/qless.rb | 16 +++++++++++- lib/qless/throttle.rb | 46 +++++++++++++++++++++++++++++++++ spec/unit/throttle_spec.rb | 52 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 lib/qless/throttle.rb create mode 100644 spec/unit/throttle_spec.rb diff --git a/lib/qless.rb b/lib/qless.rb index e4e209b0..5e9e3e62 100644 --- a/lib/qless.rb +++ b/lib/qless.rb @@ -23,6 +23,7 @@ module Qless require 'qless/version' require 'qless/config' require 'qless/queue' +require 'qless/throttle' require 'qless/job' require 'qless/lua_script' require 'qless/failure_formatter' @@ -135,6 +136,18 @@ def [](name) end end + # A class for interacting with throttles. Not meant to be instantiated directly, + # it's accessed through Client#throttles + class ClientThrottles + def initialize(client) + @client = client + end + + def [](name) + Throttle.new(name, @client) + end + end + # A class for interacting with events. Not meant to be instantiated directly, # it's accessed through Client#events class ClientEvents @@ -169,7 +182,7 @@ def stop # The client for interacting with Qless class Client # Lua script - attr_reader :_qless, :config, :redis, :jobs, :queues, :workers + attr_reader :_qless, :config, :redis, :jobs, :queues, :throttles, :workers attr_accessor :worker_name def initialize(options = {}) @@ -183,6 +196,7 @@ def initialize(options = {}) @jobs = ClientJobs.new(self) @queues = ClientQueues.new(self) + @throttles = ClientThrottles.new(self) @workers = ClientWorkers.new(self) @worker_name = [Socket.gethostname, Process.pid.to_s].join('-') end diff --git a/lib/qless/throttle.rb b/lib/qless/throttle.rb new file mode 100644 index 00000000..acde6e7a --- /dev/null +++ b/lib/qless/throttle.rb @@ -0,0 +1,46 @@ +# Encoding: utf-8 + +require 'redis' +require 'json' + +module Qless + class Throttle + attr_reader :name, :client + + def initialize(name, client) + @name = name + @client = client + end + + def delete + @client.call('throttle.delete', @name) + end + + def id + @name + end + + def locks + @client.call('throttle.locks', @name) + end + + def maximum + throttle_attrs['maximum'].to_i + end + + def maximum=(max) + @client.call('throttle.set', @name, max) + end + + def pending + @client.call('throttle.pending', @name) + end + + private + def throttle_attrs + throttle_json = @client.call('throttle.get', @name) + throttle_json ? JSON.parse(throttle_json) : {} + end + + end +end diff --git a/spec/unit/throttle_spec.rb b/spec/unit/throttle_spec.rb new file mode 100644 index 00000000..123eda46 --- /dev/null +++ b/spec/unit/throttle_spec.rb @@ -0,0 +1,52 @@ +# Encoding: utf-8 + +require 'spec_helper' +require 'yaml' +require 'qless/queue' + +module Qless + describe Throttle, :integration do + it "stores the correct the name and client at initialization" do + t = Throttle.new('name', client) + t.name.should eq('name') + t.client.should eq(client) + end + + it "can delete the named throttle" do + t = Throttle.new('name', client) + t.maximum = 5 + t.maximum.should eq(5) + t.delete + t.maximum.should eq(0) + end + + it "returns the throttle name when id is called" do + t = Throttle.new('name', client) + t.id.should eq(t.name) + end + + it "returns the set of locked jids" do + t = Throttle.new('name', client) + Redis.current.zadd('ql:t:name-locks', [[1, 1], [1, 2], [1, 3]]) + t.locks.should eq(["1", "2", "3"]) + end + + it "can set and retrieve the throttle's maximum lock count" do + t = Throttle.new('name', client) + t.maximum = 5 + t.maximum.should eq(5) + end + + it "returns the set of pending jids" do + t = Throttle.new('name', client) + Redis.current.zadd('ql:t:name-pending', [[1, 1], [1, 2], [1, 3]]) + t.pending.should eq(["1", "2", "3"]) + end + + it "handles throttle names as a String or Symbol" do + t = Throttle.new('name', client) + t.maximum = 5 + t.id.should eq(t.name) + end + end +end From 51fbae68e252ddc2310ff4062aa005924b7da0f1 Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Tue, 11 Mar 2014 11:37:07 -0400 Subject: [PATCH 02/83] initial lua code for job concurrency throttling --- lib/qless/lua/qless-lib.lua | 348 ++++++++++++++++++++++++++++-------- lib/qless/lua/qless.lua | 306 +++++++++++++++++++++++++------ 2 files changed, 526 insertions(+), 128 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 23955ca1..bd97d8d2 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: f7ef735105ade320fef8f621bf264851f246924a +-- Current SHA: 4df412313097935d1d36bc0adc04ae09168cc53c -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -25,6 +25,12 @@ local QlessJob = { } QlessJob.__index = QlessJob +-- throttle forward declaration +local QlessThrottle = { + ns = Qless.ns .. 't:' +} +QlessThrottle.__index = QlessThrottle + -- RecurringJob forward declaration local QlessRecurringJob = {} QlessRecurringJob.__index = QlessRecurringJob @@ -63,19 +69,74 @@ function Qless.recurring(jid) return job end +-- Return a throttle object +-- throttle objects are used for arbitrary throttling of jobs. +function Qless.throttle(tid) + assert(tid, 'Throttle(): no tid provided') + local throttle = QlessThrottle.data({id = tid}) + if not throttle then + throttle = { + id = tid, + maximum = 0 + } + end + setmetatable(throttle, QlessThrottle) + + -- set of jids which have acquired a lock on this throttle. + throttle.locks = { + count = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-locks') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', 0, -1) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', QlessThrottle.ns .. tid .. '-locks', unpack(arg)) + end + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-locks', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-locks', min, max) + end + } + + -- set of jids waiting on this throttle to become available. + throttle.pending = { + count = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) + end + } + return throttle +end + -- Failed([group, [start, [limit]]]) -- ------------------------------------ -- If no group is provided, this returns a JSON blob of the counts of the -- various groups of failures known. If a group is provided, it will report up -- to `limit` from `start` of the jobs affected by that issue. --- +-- -- # If no group, then... -- { -- 'group1': 1, -- 'group2': 5, -- ... -- } --- +-- -- # If a group is provided, then... -- { -- 'total': 20, @@ -121,9 +182,9 @@ end ------------------------------------------------------------------------------- -- Return all the job ids currently considered to be in the provided state -- in a particular queue. The response is a list of job ids: --- +-- -- [ --- jid1, +-- jid1, -- jid2, -- ... -- ] @@ -169,7 +230,7 @@ end -- associated with that id, and 'untrack' stops tracking it. In this context, -- tracking is nothing more than saving the job to a list of jobs that are -- considered special. --- +-- -- { -- 'jobs': [ -- { @@ -191,6 +252,7 @@ function Qless.track(now, command, jid) assert(jid, 'Track(): Arg "jid" missing') -- Verify that job exists assert(Qless.job(jid):exists(), 'Track(): Job does not exist') + redis.call('set', 'print_line_track_command', now .. command .. jid) if string.lower(command) == 'track' then Qless.publish('track', jid) return redis.call('zadd', 'ql:tracked', now, jid) @@ -254,7 +316,7 @@ function Qless.tag(now, command, ...) tags = cjson.decode(tags) local _tags = {} for i,v in ipairs(tags) do _tags[v] = true end - + -- Otherwise, add the job to the sorted set with that tags for i=2,#arg do local tag = arg[i] @@ -265,7 +327,7 @@ function Qless.tag(now, command, ...) redis.call('zadd', 'ql:t:' .. tag, now, jid) redis.call('zincrby', 'ql:tags', 1, tag) end - + tags = cjson.encode(tags) redis.call('hset', QlessJob.ns .. jid, 'tags', tags) return tags @@ -281,7 +343,7 @@ function Qless.tag(now, command, ...) tags = cjson.decode(tags) local _tags = {} for i,v in ipairs(tags) do _tags[v] = true end - + -- Otherwise, add the job to the sorted set with that tags for i=2,#arg do local tag = arg[i] @@ -289,10 +351,10 @@ function Qless.tag(now, command, ...) redis.call('zrem', 'ql:t:' .. tag, jid) redis.call('zincrby', 'ql:tags', -1, tag) end - + local results = {} for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end - + tags = cjson.encode(results) redis.call('hset', QlessJob.ns .. jid, 'tags', tags) return results @@ -345,6 +407,7 @@ function Qless.cancel(...) -- If we've made it this far, then we are good to go. We can now just -- remove any trace of all these jobs, as they form a dependent clique for _, jid in ipairs(arg) do + local namespaced_jid = QlessJob.ns .. jid -- Find any stage it's associated with and remove its from that stage local state, queue, failure, worker = unpack(redis.call( 'hmget', QlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker')) @@ -375,6 +438,8 @@ function Qless.cancel(...) queue.depends.remove(jid) end + Qless.job(namespaced_jid):release_throttle() + -- We should probably go through all our dependencies and remove -- ourselves from the list of dependents for i, j in ipairs(redis.call( @@ -420,7 +485,7 @@ function Qless.cancel(...) redis.call('del', QlessJob.ns .. jid .. '-history') end end - + return arg end @@ -492,7 +557,7 @@ function QlessJob:data(...) local job = redis.call( 'hmget', QlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority', 'expires', 'retries', 'remaining', 'data', - 'tags', 'failure') + 'tags', 'failure', 'throttle') -- Return nil if we haven't found it if not job[1] then @@ -515,6 +580,7 @@ function QlessJob:data(...) tags = cjson.decode(job[11]), history = self:history(), failure = cjson.decode(job[12] or '{}'), + throttle = job[13] or nil, dependents = redis.call( 'smembers', QlessJob.ns .. self.jid .. '-dependents'), dependencies = redis.call( @@ -536,11 +602,11 @@ end -- Complete a job and optionally put it in another queue, either scheduled or -- to be considered waiting immediately. It can also optionally accept other --- jids on which this job will be considered dependent before it's considered +-- jids on which this job will be considered dependent before it's considered -- valid. -- -- The variable-length arguments may be pairs of the form: --- +-- -- ('next' , queue) : The queue to advance it to next -- ('delay' , delay) : The delay for the next queue -- ('depends', : Json of jobs it depends on in the new queue @@ -555,7 +621,7 @@ function QlessJob:complete(now, worker, queue, data, ...) -- Read in all the optional parameters local options = {} for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end - + -- Sanity check on optional args local nextq = options['next'] local delay = assert(tonumber(options['delay'] or 0)) @@ -577,9 +643,9 @@ function QlessJob:complete(now, worker, queue, data, ...) local bin = now - (now % 86400) -- First things first, we should see if the worker still owns this job - local lastworker, state, priority, retries = unpack( + local lastworker, state, priority, retries, current_queue = unpack( redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state', - 'priority', 'retries', 'dependents')) + 'priority', 'retries', 'queue')) if lastworker == false then error('Complete(): Job does not exist') @@ -588,6 +654,9 @@ function QlessJob:complete(now, worker, queue, data, ...) elseif lastworker ~= worker then error('Complete(): Job has been handed out to another worker: ' .. tostring(lastworker)) + elseif queue ~= current_queue then + error('Complete(): Job running in another queue: ' .. + tostring(current_queue)) end -- Now we can assume that the worker does own the job. We need to @@ -608,6 +677,8 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) + self:release_throttle(now) + ---------------------------------------------------------- -- This is the massive stats update that we have to do ---------------------------------------------------------- @@ -645,7 +716,7 @@ function QlessJob:complete(now, worker, queue, data, ...) if redis.call('zscore', 'ql:queues', nextq) == false then redis.call('zadd', 'ql:queues', now, nextq) end - + redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'waiting', 'worker', '', @@ -653,7 +724,7 @@ function QlessJob:complete(now, worker, queue, data, ...) 'queue', nextq, 'expires', 0, 'remaining', tonumber(retries)) - + if (delay > 0) and (#depends == 0) then queue_obj.scheduled.add(now + delay, self.jid) return 'scheduled' @@ -701,18 +772,18 @@ function QlessJob:complete(now, worker, queue, data, ...) 'queue', '', 'expires', 0, 'remaining', tonumber(retries)) - + -- Do the completion dance local count = Qless.config.get('jobs-history-count') local time = Qless.config.get('jobs-history') - + -- These are the default values count = tonumber(count or 50000) time = tonumber(time or 7 * 24 * 60 * 60) - + -- Schedule this job for destructination eventually redis.call('zadd', 'ql:completed', now, self.jid) - + -- Now look at the expired job data. First, based on the current time local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time) -- Any jobs that need to be expired... delete @@ -728,7 +799,7 @@ function QlessJob:complete(now, worker, queue, data, ...) end -- And now remove those from the queued-for-cleanup queue redis.call('zremrangebyscore', 'ql:completed', 0, now - time) - + -- Now take the all by the most recent 'count' ids jids = redis.call('zrange', 'ql:completed', 0, (-1-count)) for index, jid in ipairs(jids) do @@ -742,7 +813,7 @@ function QlessJob:complete(now, worker, queue, data, ...) redis.call('del', QlessJob.ns .. jid .. '-history') end redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count)) - + -- Alright, if this has any dependents, then we should go ahead -- and unstick those guys. for i, j in ipairs(redis.call( @@ -766,10 +837,10 @@ function QlessJob:complete(now, worker, queue, data, ...) end end end - + -- Delete our dependents key redis.call('del', QlessJob.ns .. self.jid .. '-dependents') - + return 'complete' end end @@ -780,14 +851,14 @@ end -- specific message. By `group`, we mean some phrase that might be one of -- several categorical modes of failure. The `message` is something more -- job-specific, like perhaps a traceback. --- +-- -- This method should __not__ be used to note that a job has been dropped or -- has failed in a transient way. This method __should__ be used to note that -- a job has something really wrong with it that must be remedied. --- +-- -- The motivation behind the `group` is so that similar errors can be grouped -- together. Optionally, updated data can be provided for the job. A job in --- any state can be marked as failed. If it has been given to a worker as a +-- any state can be marked as failed. If it has been given to a worker as a -- job, then its subsequent requests to heartbeat or complete that job will -- fail. Failed jobs are kept until they are canceled or completed. -- @@ -858,7 +929,7 @@ function QlessJob:fail(now, worker, group, message, data) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) - -- The reason that this appears here is that the above will fail if the + -- The reason that this appears here is that the above will fail if the -- job doesn't exist if data then redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data)) @@ -875,6 +946,8 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) + self:release_throttle(now) + -- Add this group of failure to the list of failures redis.call('sadd', 'ql:failures', group) -- And add this particular instance to the failed groups @@ -895,7 +968,7 @@ end -- Throws an exception if: -- - the worker is not the worker with a lock on the job -- - the job is not actually running --- +-- -- Otherwise, it returns the number of retries remaining. If the allowed -- retries have been exhausted, then it is automatically failed, and a negative -- number is returned. @@ -908,7 +981,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) assert(worker, 'Retry(): Arg "worker" missing') delay = assert(tonumber(delay or 0), 'Retry(): Arg "delay" not a number: ' .. tostring(delay)) - + -- Let's see what the old priority, and tags were local oldqueue, state, retries, oldworker, priority, failure = unpack( redis.call('hmget', QlessJob.ns .. self.jid, 'queue', 'state', @@ -933,6 +1006,10 @@ function QlessJob:retry(now, queue, worker, delay, group, message) -- Remove it from the locks key of the old queue Qless.queue(oldqueue).locks.remove(self.jid) + -- Release the throttle for the job + self:release_throttle(now) + self:acquire_throttle() + -- Remove this job from the worker that was previously working it redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -941,7 +1018,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) -- queue it's in local group = group or 'failed-retries-' .. queue self:history(now, 'failed', {['group'] = group}) - + redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'failed', 'worker', '', 'expires', '') @@ -965,7 +1042,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) ['worker'] = unpack(self:data('worker')) })) end - + -- Add this type of failure to the list of failures redis.call('sadd', 'ql:failures', group) -- And add this particular instance to the failed types @@ -1117,11 +1194,11 @@ function QlessJob:heartbeat(now, worker, data) redis.call('hmset', QlessJob.ns .. self.jid, 'expires', expires, 'worker', worker) end - + -- Update hwen this job was last updated on that worker -- Add this job to the list of jobs handled by this worker redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid) - + -- And now we should just update the locks local queue = Qless.queue( redis.call('hget', QlessJob.ns .. self.jid, 'queue')) @@ -1267,6 +1344,21 @@ function QlessJob:history(now, what, item) cjson.encode({math.floor(now), what, item})) end end + +function QlessJob:release_throttle(now) + local tid = redis.call('hget', QlessJob.ns .. self.jid, 'throttle') + if tid then + Qless.throttle(tid):release(now, self.jid) + end +end + +function QlessJob:acquire_throttle() + local tid = unpack(redis.call('hmget', QlessJob.ns .. self.jid, 'throttle')) + if tid then + return Qless.throttle(tid):acquire(self.jid) + end + return true +end ------------------------------------------------------------------------------- -- Queue class ------------------------------------------------------------------------------- @@ -1346,6 +1438,21 @@ function Qless.queue(name) end } + -- Access to our throttled jobs + queue.throttled = { + peek = function(now, offset, count) + return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) + end, add = function(now, jid) + redis.call('zadd', queue:prefix('throttled'), jid) + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) + end + end, length = function() + return redis.call('zcard', queue:prefix('throttled')) + end + } + -- Access to our scheduled jobs queue.scheduled = { peek = function(now, offset, count) @@ -1451,11 +1558,11 @@ function QlessQueue:stats(now, date) local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk')) - + count = tonumber(count) or 0 mean = tonumber(mean) or 0 vk = tonumber(vk) - + results.count = count or 0 results.mean = mean or 0 results.histogram = {} @@ -1505,8 +1612,8 @@ function QlessQueue:peek(now, count) -- Now we've checked __all__ the locks for this queue the could -- have expired, and are no more than the number requested. If - -- we still need values in order to meet the demand, then we - -- should check if any scheduled items, and if so, we should + -- we still need values in order to meet the demand, then we + -- should check if any scheduled items, and if so, we should -- insert them to ensure correctness when pulling off the next -- unit of work. self:check_scheduled(now, count - #jids) @@ -1580,8 +1687,8 @@ function QlessQueue:pop(now, worker, count) -- look for all the recurring jobs that need jobs run self:check_recurring(now, count - #jids) - -- If we still need values in order to meet the demand, then we - -- should check if any scheduled items, and if so, we should + -- If we still need values in order to meet the demand, then we + -- should check if any scheduled items, and if so, we should -- insert them to ensure correctness when pulling off the next -- unit of work. self:check_scheduled(now, count - #jids) @@ -1603,19 +1710,19 @@ function QlessQueue:pop(now, worker, count) self:stat(now, 'wait', waiting) redis.call('hset', QlessJob.ns .. jid, 'time', string.format("%.20f", now)) - + -- Add this job to the list of jobs handled by this worker redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) - + -- Update the jobs data, and add its locks, and return the job job:update({ worker = worker, expires = expires, state = 'running' }) - + self.locks.add(expires, jid) - + local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false if tracked then Qless.publish('popped', jid) @@ -1666,7 +1773,7 @@ function QlessQueue:stat(now, stat, val) redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1) else -- days redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1) - end + end redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk) end @@ -1706,14 +1813,15 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) end -- Sanity check on optional args - retries = assert(tonumber(options['retries'] or retries or 5) , + local retries = assert(tonumber(options['retries'] or retries or 5) , 'Put(): Arg "retries" not a number: ' .. tostring(options['retries'])) - tags = assert(cjson.decode(options['tags'] or tags or '[]' ), + local tags = assert(cjson.decode(options['tags'] or tags or '[]' ), 'Put(): Arg "tags" not JSON' .. tostring(options['tags'])) - priority = assert(tonumber(options['priority'] or priority or 0), + local priority = assert(tonumber(options['priority'] or priority or 0), 'Put(): Arg "priority" not a number' .. tostring(options['priority'])) local depends = assert(cjson.decode(options['depends'] or '[]') , 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends'])) + -- local throttle = options['throttle'] -- If the job has old dependencies, determine which dependencies are -- in the new dependencies but not in the old ones, and which are in the @@ -1726,7 +1834,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) -- Now find what's in the original, but not the new local original = redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies') - for _, dep in pairs(original) do + for _, dep in pairs(original) do if new[dep] == nil then -- Remove k as a dependency redis.call('srem', QlessJob.ns .. dep .. '-dependents' , jid) @@ -1800,8 +1908,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1) end - -- First, let's save its data - redis.call('hmset', QlessJob.ns .. jid, + data = { 'jid' , jid, 'klass' , klass, 'data' , raw_data, @@ -1813,7 +1920,17 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", now)) + 'time' , string.format("%.20f", now) + } + + -- Insert the throttle resource into the array if it exists. + if options['throttle'] then + table.insert(data, 'throttle') + table.insert(data, options['throttle']) + end + + -- First, let's save its data + redis.call('hmset', QlessJob.ns .. jid, unpack(data)) -- These are the jids we legitimately have to wait on for i, j in ipairs(depends) do @@ -1843,13 +1960,15 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') - else + elseif job:acquire_throttle() then self.work.add(now, priority, jid) + else + self.throttled.add(jid) end end -- Lastly, we're going to make sure that this item is in the - -- set of known queues. We should keep this sorted by the + -- set of known queues. We should keep this sorted by the -- order in which we saw each of these queues if redis.call('zscore', 'ql:queues', self.name) == false then redis.call('zadd', 'ql:queues', now, self.name) @@ -1919,7 +2038,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) if #arg % 2 == 1 then error('Odd number of additional args: ' .. tostring(arg)) end - + -- Read in all the optional parameters local options = {} for i = 3, #arg, 2 do options[arg[i]] = arg[i + 1] end @@ -1939,12 +2058,12 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue')) count = count or 0 - -- If it has previously been in another queue, then we should remove + -- If it has previously been in another queue, then we should remove -- some information about it if old_queue then Qless.queue(old_queue).recurring.remove(jid) end - + -- Do some insertions redis.call('hmset', 'ql:r:' .. jid, 'jid' , jid, @@ -1962,14 +2081,14 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) 'backlog' , options.backlog) -- Now, we should schedule the next run of the job self.recurring.add(now + offset, jid) - + -- Lastly, we're going to make sure that this item is in the - -- set of known queues. We should keep this sorted by the + -- set of known queues. We should keep this sorted by the -- order in which we saw each of these queues if redis.call('zscore', 'ql:queues', self.name) == false then redis.call('zadd', 'ql:queues', now, self.name) end - + return jid else error('Recur(): schedule type "' .. tostring(spec) .. '" unknown') @@ -2015,20 +2134,20 @@ function QlessQueue:check_recurring(now, count) ) end end - - -- We're saving this value so that in the history, we can accurately + + -- We're saving this value so that in the history, we can accurately -- reflect when the job would normally have been scheduled while (score <= now) and (moved < count) do local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1) moved = moved + 1 - + -- Add this job to the list of jobs tagged with whatever tags were -- supplied for i, tag in ipairs(_tags) do redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count) redis.call('zincrby', 'ql:tags', 1, tag) end - + -- First, let's save its data local child_jid = jid .. '-' .. count redis.call('hmset', QlessJob.ns .. child_jid, @@ -2045,12 +2164,12 @@ function QlessQueue:check_recurring(now, count) 'remaining', retries, 'time' , string.format("%.20f", score)) Qless.job(child_jid):history(score, 'put', {q = self.name}) - + -- Now, if a delay was provided, and if it's in the future, -- then we'll have to schedule it. Otherwise, we're just -- going to add it to the work queue. self.work.add(score, priority, jid .. '-' .. count) - + score = score + interval self.recurring.add(score, jid) end @@ -2065,7 +2184,7 @@ function QlessQueue:check_scheduled(now, count) -- insert into the work queue local scheduled = self.scheduled.ready(now, 0, count) for index, jid in ipairs(scheduled) do - -- With these in hand, we'll have to go out and find the + -- With these in hand, we'll have to go out and find the -- priorities of these jobs, and then we'll insert them -- into the work queue and then when that's complete, we'll -- remove them from the scheduled queue @@ -2150,7 +2269,7 @@ function QlessQueue:invalidate_locks(now, count) -- See how many remaining retries the job has local remaining = tonumber(redis.call( 'hincrby', QlessJob.ns .. jid, 'remaining', -1)) - + -- This is where we actually have to time out the work if remaining < 0 then -- Now remove the instance from the schedule, and work queues @@ -2158,7 +2277,7 @@ function QlessQueue:invalidate_locks(now, count) self.work.remove(jid) self.locks.remove(jid) self.scheduled.remove(jid) - + local group = 'failed-retries-' .. Qless.job(jid):data()['queue'] local job = Qless.job(jid) job:history(now, 'failed', {group = group}) @@ -2174,12 +2293,12 @@ function QlessQueue:invalidate_locks(now, count) ['when'] = now, ['worker'] = unpack(job:data('worker')) })) - + -- Add this type of failure to the list of failures redis.call('sadd', 'ql:failures', group) -- And add this particular instance to the failed types redis.call('lpush', 'ql:f:' .. group, jid) - + if redis.call('zscore', 'ql:tracked', jid) ~= false then Qless.publish('failed', jid) end @@ -2457,3 +2576,82 @@ function QlessWorker.counts(now, worker) return response end end +-- Retrieve the data fro a throttled resource +function QlessThrottle:data() + local throttle = redis.call('hmget', QlessThrottle.ns .. self.id, 'id', 'maximum') + -- Return nil if we haven't found it + if not throttle[1] then + return nil + end + + local data = { + id = throttle[1], + maximum = tonumber(throttle[2]) + } + return data +end + +-- Set the data for a throttled resource +function QlessThrottle:set(data) + redis.call('hmset', QlessThrottle.ns .. self.id, 'id', self.id, 'maximum', data.maximum) +end + +-- Delete a throttled resource +function QlessThrottle:unset() + redis.call('del', QlessThrottle.ns .. self.id) +end + +-- Acquire a throttled resource for a job. +-- if the resource is at full capacity then add it to the pending +-- set. +-- Returns true of the job acquired the resource. +function QlessThrottle:acquire(jid) + if self:available() then + redis.call('set', 'printline', jid .. ' is acquiring the lock for ' .. self.id) + self.locks.add(1, jid) + return true + else + redis.call('set', 'printline', jid .. ' failed acquiring the lock for ' .. self.id .. ' marked as pending') + self.pending.add(1, jid) + return false + end +end + +-- Release a throttled resource. +-- This will take a currently pending job +-- and attempt to acquire a lock. +-- If it succeeds at acquiring a lock then +-- the job will be moved from the throttled +-- queue into the work queue +function QlessThrottle:release(now, jid) + redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) + self.locks.remove(jid) + redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) + local next_jid = unpack(self:pending_pop(0, 0)) + if next_jid and self:acquire(next_jid) then + local job = Qless.job(next_jid):data() + local queue_obj = Qless.queue(job.queue) + queue_obj.throttled.remove(job.jid) + queue_obj.work.add(now, job.priority, job.jid) + end +end + +function QlessThrottle:lock_pop(min, max) + local lock = Qless.throttle(self.id).locks + local jid = lock.peek(min,max) + lock.pop(min,max) + return jid +end + +function QlessThrottle:pending_pop(min, max) + local pending = Qless.throttle(self.id).pending + local jids = pending.peek(min,max) + pending.pop(min,max) + return jids +end + +-- Returns true if the throttle has locks available, false otherwise. +function QlessThrottle:available() + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < self.maximum') + return self.maximum == 0 or self.locks.count() < self.maximum +end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 7e853191..9fc80362 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: f7ef735105ade320fef8f621bf264851f246924a +-- Current SHA: 4df412313097935d1d36bc0adc04ae09168cc53c -- This is a generated file local Qless = { ns = 'ql:' @@ -19,6 +19,11 @@ local QlessJob = { } QlessJob.__index = QlessJob +local QlessThrottle = { + ns = Qless.ns .. 't:' +} +QlessThrottle.__index = QlessThrottle + local QlessRecurringJob = {} QlessRecurringJob.__index = QlessRecurringJob @@ -50,6 +55,57 @@ function Qless.recurring(jid) return job end +function Qless.throttle(tid) + assert(tid, 'Throttle(): no tid provided') + local throttle = QlessThrottle.data({id = tid}) + if not throttle then + throttle = { + id = tid, + maximum = 0 + } + end + setmetatable(throttle, QlessThrottle) + + throttle.locks = { + count = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-locks') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', 0, -1) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', QlessThrottle.ns .. tid .. '-locks', unpack(arg)) + end + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-locks', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-locks', min, max) + end + } + + throttle.pending = { + count = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) + end + } + return throttle +end + function Qless.failed(group, start, limit) start = assert(tonumber(start or 0), 'Failed(): Arg "start" is not a number: ' .. (start or 'nil')) @@ -109,6 +165,7 @@ function Qless.track(now, command, jid) if command ~= nil then assert(jid, 'Track(): Arg "jid" missing') assert(Qless.job(jid):exists(), 'Track(): Job does not exist') + redis.call('set', 'print_line_track_command', now .. command .. jid) if string.lower(command) == 'track' then Qless.publish('track', jid) return redis.call('zadd', 'ql:tracked', now, jid) @@ -147,7 +204,7 @@ function Qless.tag(now, command, ...) tags = cjson.decode(tags) local _tags = {} for i,v in ipairs(tags) do _tags[v] = true end - + for i=2,#arg do local tag = arg[i] if _tags[tag] == nil then @@ -157,7 +214,7 @@ function Qless.tag(now, command, ...) redis.call('zadd', 'ql:t:' .. tag, now, jid) redis.call('zincrby', 'ql:tags', 1, tag) end - + tags = cjson.encode(tags) redis.call('hset', QlessJob.ns .. jid, 'tags', tags) return tags @@ -171,17 +228,17 @@ function Qless.tag(now, command, ...) tags = cjson.decode(tags) local _tags = {} for i,v in ipairs(tags) do _tags[v] = true end - + for i=2,#arg do local tag = arg[i] _tags[tag] = nil redis.call('zrem', 'ql:t:' .. tag, jid) redis.call('zincrby', 'ql:tags', -1, tag) end - + local results = {} for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end - + tags = cjson.encode(results) redis.call('hset', QlessJob.ns .. jid, 'tags', tags) return results @@ -224,6 +281,7 @@ function Qless.cancel(...) end for _, jid in ipairs(arg) do + local namespaced_jid = QlessJob.ns .. jid local state, queue, failure, worker = unpack(redis.call( 'hmget', QlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker')) @@ -249,6 +307,8 @@ function Qless.cancel(...) queue.depends.remove(jid) end + Qless.job(namespaced_jid):release_throttle() + for i, j in ipairs(redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies')) do redis.call('srem', QlessJob.ns .. j .. '-dependents', jid) @@ -284,7 +344,7 @@ function Qless.cancel(...) redis.call('del', QlessJob.ns .. jid .. '-history') end end - + return arg end @@ -338,7 +398,7 @@ function QlessJob:data(...) local job = redis.call( 'hmget', QlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority', 'expires', 'retries', 'remaining', 'data', - 'tags', 'failure') + 'tags', 'failure', 'throttle') if not job[1] then return nil @@ -360,6 +420,7 @@ function QlessJob:data(...) tags = cjson.decode(job[11]), history = self:history(), failure = cjson.decode(job[12] or '{}'), + throttle = job[13] or nil, dependents = redis.call( 'smembers', QlessJob.ns .. self.jid .. '-dependents'), dependencies = redis.call( @@ -385,7 +446,7 @@ function QlessJob:complete(now, worker, queue, data, ...) local options = {} for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end - + local nextq = options['next'] local delay = assert(tonumber(options['delay'] or 0)) local depends = assert(cjson.decode(options['depends'] or '[]'), @@ -401,9 +462,9 @@ function QlessJob:complete(now, worker, queue, data, ...) local bin = now - (now % 86400) - local lastworker, state, priority, retries = unpack( + local lastworker, state, priority, retries, current_queue = unpack( redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state', - 'priority', 'retries', 'dependents')) + 'priority', 'retries', 'queue')) if lastworker == false then error('Complete(): Job does not exist') @@ -412,6 +473,9 @@ function QlessJob:complete(now, worker, queue, data, ...) elseif lastworker ~= worker then error('Complete(): Job has been handed out to another worker: ' .. tostring(lastworker)) + elseif queue ~= current_queue then + error('Complete(): Job running in another queue: ' .. + tostring(current_queue)) end self:history(now, 'done') @@ -425,6 +489,8 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) + self:release_throttle(now) + local time = tonumber( redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) local waiting = now - time @@ -452,7 +518,7 @@ function QlessJob:complete(now, worker, queue, data, ...) if redis.call('zscore', 'ql:queues', nextq) == false then redis.call('zadd', 'ql:queues', now, nextq) end - + redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'waiting', 'worker', '', @@ -460,7 +526,7 @@ function QlessJob:complete(now, worker, queue, data, ...) 'queue', nextq, 'expires', 0, 'remaining', tonumber(retries)) - + if (delay > 0) and (#depends == 0) then queue_obj.scheduled.add(now + delay, self.jid) return 'scheduled' @@ -503,15 +569,15 @@ function QlessJob:complete(now, worker, queue, data, ...) 'queue', '', 'expires', 0, 'remaining', tonumber(retries)) - + local count = Qless.config.get('jobs-history-count') local time = Qless.config.get('jobs-history') - + count = tonumber(count or 50000) time = tonumber(time or 7 * 24 * 60 * 60) - + redis.call('zadd', 'ql:completed', now, self.jid) - + local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time) for index, jid in ipairs(jids) do local tags = cjson.decode( @@ -524,7 +590,7 @@ function QlessJob:complete(now, worker, queue, data, ...) redis.call('del', QlessJob.ns .. jid .. '-history') end redis.call('zremrangebyscore', 'ql:completed', 0, now - time) - + jids = redis.call('zrange', 'ql:completed', 0, (-1-count)) for index, jid in ipairs(jids) do local tags = cjson.decode( @@ -537,7 +603,7 @@ function QlessJob:complete(now, worker, queue, data, ...) redis.call('del', QlessJob.ns .. jid .. '-history') end redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count)) - + for i, j in ipairs(redis.call( 'smembers', QlessJob.ns .. self.jid .. '-dependents')) do redis.call('srem', QlessJob.ns .. j .. '-dependencies', self.jid) @@ -559,9 +625,9 @@ function QlessJob:complete(now, worker, queue, data, ...) end end end - + redis.call('del', QlessJob.ns .. self.jid .. '-dependents') - + return 'complete' end end @@ -627,6 +693,8 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) + self:release_throttle(now) + redis.call('sadd', 'ql:failures', group) redis.call('lpush', 'ql:f:' .. group, self.jid) @@ -639,7 +707,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) assert(worker, 'Retry(): Arg "worker" missing') delay = assert(tonumber(delay or 0), 'Retry(): Arg "delay" not a number: ' .. tostring(delay)) - + local oldqueue, state, retries, oldworker, priority, failure = unpack( redis.call('hmget', QlessJob.ns .. self.jid, 'queue', 'state', 'retries', 'worker', 'priority', 'failure')) @@ -658,12 +726,15 @@ function QlessJob:retry(now, queue, worker, delay, group, message) Qless.queue(oldqueue).locks.remove(self.jid) + self:release_throttle(now) + self:acquire_throttle() + redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) if remaining < 0 then local group = group or 'failed-retries-' .. queue self:history(now, 'failed', {['group'] = group}) - + redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'failed', 'worker', '', 'expires', '') @@ -686,7 +757,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) ['worker'] = unpack(self:data('worker')) })) end - + redis.call('sadd', 'ql:failures', group) redis.call('lpush', 'ql:f:' .. group, self.jid) local bin = now - (now % 86400) @@ -804,9 +875,9 @@ function QlessJob:heartbeat(now, worker, data) redis.call('hmset', QlessJob.ns .. self.jid, 'expires', expires, 'worker', worker) end - + redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid) - + local queue = Qless.queue( redis.call('hget', QlessJob.ns .. self.jid, 'queue')) queue.locks.add(expires, self.jid) @@ -927,6 +998,21 @@ function QlessJob:history(now, what, item) cjson.encode({math.floor(now), what, item})) end end + +function QlessJob:release_throttle(now) + local tid = redis.call('hget', QlessJob.ns .. self.jid, 'throttle') + if tid then + Qless.throttle(tid):release(now, self.jid) + end +end + +function QlessJob:acquire_throttle() + local tid = unpack(redis.call('hmget', QlessJob.ns .. self.jid, 'throttle')) + if tid then + return Qless.throttle(tid):acquire(self.jid) + end + return true +end function Qless.queue(name) assert(name, 'Queue(): no queue name provided') local queue = {} @@ -997,6 +1083,20 @@ function Qless.queue(name) end } + queue.throttled = { + peek = function(now, offset, count) + return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) + end, add = function(now, jid) + redis.call('zadd', queue:prefix('throttled'), jid) + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) + end + end, length = function() + return redis.call('zcard', queue:prefix('throttled')) + end + } + queue.scheduled = { peek = function(now, offset, count) return redis.call('zrange', @@ -1063,11 +1163,11 @@ function QlessQueue:stats(now, date) local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk')) - + count = tonumber(count) or 0 mean = tonumber(mean) or 0 vk = tonumber(vk) - + results.count = count or 0 results.mean = mean or 0 results.histogram = {} @@ -1172,17 +1272,17 @@ function QlessQueue:pop(now, worker, count) self:stat(now, 'wait', waiting) redis.call('hset', QlessJob.ns .. jid, 'time', string.format("%.20f", now)) - + redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) - + job:update({ worker = worker, expires = expires, state = 'running' }) - + self.locks.add(expires, jid) - + local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false if tracked then Qless.publish('popped', jid) @@ -1222,7 +1322,7 @@ function QlessQueue:stat(now, stat, val) redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1) else -- days redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1) - end + end redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk) end @@ -1249,11 +1349,11 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) Qless.tag(now, 'remove', jid, unpack(cjson.decode(tags))) end - retries = assert(tonumber(options['retries'] or retries or 5) , + local retries = assert(tonumber(options['retries'] or retries or 5) , 'Put(): Arg "retries" not a number: ' .. tostring(options['retries'])) - tags = assert(cjson.decode(options['tags'] or tags or '[]' ), + local tags = assert(cjson.decode(options['tags'] or tags or '[]' ), 'Put(): Arg "tags" not JSON' .. tostring(options['tags'])) - priority = assert(tonumber(options['priority'] or priority or 0), + local priority = assert(tonumber(options['priority'] or priority or 0), 'Put(): Arg "priority" not a number' .. tostring(options['priority'])) local depends = assert(cjson.decode(options['depends'] or '[]') , 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends'])) @@ -1264,7 +1364,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) local original = redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies') - for _, dep in pairs(original) do + for _, dep in pairs(original) do if new[dep] == nil then redis.call('srem', QlessJob.ns .. dep .. '-dependents' , jid) redis.call('srem', QlessJob.ns .. jid .. '-dependencies', dep) @@ -1320,7 +1420,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1) end - redis.call('hmset', QlessJob.ns .. jid, + data = { 'jid' , jid, 'klass' , klass, 'data' , raw_data, @@ -1332,7 +1432,15 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", now)) + 'time' , string.format("%.20f", now) + } + + if options['throttle'] then + table.insert(data, 'throttle') + table.insert(data, options['throttle']) + end + + redis.call('hmset', QlessJob.ns .. jid, unpack(data)) for i, j in ipairs(depends) do local state = redis.call('hget', QlessJob.ns .. j, 'state') @@ -1355,8 +1463,10 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') - else + elseif job:acquire_throttle() then self.work.add(now, priority, jid) + else + self.throttled.add(jid) end end @@ -1419,7 +1529,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) if #arg % 2 == 1 then error('Odd number of additional args: ' .. tostring(arg)) end - + local options = {} for i = 3, #arg, 2 do options[arg[i]] = arg[i + 1] end options.tags = assert(cjson.decode(options.tags or '{}'), @@ -1441,7 +1551,7 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) if old_queue then Qless.queue(old_queue).recurring.remove(jid) end - + redis.call('hmset', 'ql:r:' .. jid, 'jid' , jid, 'klass' , klass, @@ -1456,11 +1566,11 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) 'retries' , options.retries, 'backlog' , options.backlog) self.recurring.add(now + offset, jid) - + if redis.call('zscore', 'ql:queues', self.name) == false then redis.call('zadd', 'ql:queues', now, self.name) end - + return jid else error('Recur(): schedule type "' .. tostring(spec) .. '" unknown') @@ -1491,16 +1601,16 @@ function QlessQueue:check_recurring(now, count) ) end end - + while (score <= now) and (moved < count) do local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1) moved = moved + 1 - + for i, tag in ipairs(_tags) do redis.call('zadd', 'ql:t:' .. tag, now, jid .. '-' .. count) redis.call('zincrby', 'ql:tags', 1, tag) end - + local child_jid = jid .. '-' .. count redis.call('hmset', QlessJob.ns .. child_jid, 'jid' , jid .. '-' .. count, @@ -1516,9 +1626,9 @@ function QlessQueue:check_recurring(now, count) 'remaining', retries, 'time' , string.format("%.20f", score)) Qless.job(child_jid):history(score, 'put', {q = self.name}) - + self.work.add(score, priority, jid .. '-' .. count) - + score = score + interval self.recurring.add(score, jid) end @@ -1583,12 +1693,12 @@ function QlessQueue:invalidate_locks(now, count) local remaining = tonumber(redis.call( 'hincrby', QlessJob.ns .. jid, 'remaining', -1)) - + if remaining < 0 then self.work.remove(jid) self.locks.remove(jid) self.scheduled.remove(jid) - + local group = 'failed-retries-' .. Qless.job(jid):data()['queue'] local job = Qless.job(jid) job:history(now, 'failed', {group = group}) @@ -1603,10 +1713,10 @@ function QlessQueue:invalidate_locks(now, count) ['when'] = now, ['worker'] = unpack(job:data('worker')) })) - + redis.call('sadd', 'ql:failures', group) redis.call('lpush', 'ql:f:' .. group, jid) - + if redis.call('zscore', 'ql:tracked', jid) ~= false then Qless.publish('failed', jid) end @@ -1805,6 +1915,70 @@ function QlessWorker.counts(now, worker) return response end end +function QlessThrottle:data() + local throttle = redis.call('hmget', QlessThrottle.ns .. self.id, 'id', 'maximum') + if not throttle[1] then + return nil + end + + local data = { + id = throttle[1], + maximum = tonumber(throttle[2]) + } + return data +end + +function QlessThrottle:set(data) + redis.call('hmset', QlessThrottle.ns .. self.id, 'id', self.id, 'maximum', data.maximum) +end + +function QlessThrottle:unset() + redis.call('del', QlessThrottle.ns .. self.id) +end + +function QlessThrottle:acquire(jid) + if self:available() then + redis.call('set', 'printline', jid .. ' is acquiring the lock for ' .. self.id) + self.locks.add(1, jid) + return true + else + redis.call('set', 'printline', jid .. ' failed acquiring the lock for ' .. self.id .. ' marked as pending') + self.pending.add(1, jid) + return false + end +end + +function QlessThrottle:release(now, jid) + redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) + self.locks.remove(jid) + redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) + local next_jid = unpack(self:pending_pop(0, 0)) + if next_jid and self:acquire(next_jid) then + local job = Qless.job(next_jid):data() + local queue_obj = Qless.queue(job.queue) + queue_obj.throttled.remove(job.jid) + queue_obj.work.add(now, job.priority, job.jid) + end +end + +function QlessThrottle:lock_pop(min, max) + local lock = Qless.throttle(self.id).locks + local jid = lock.peek(min,max) + lock.pop(min,max) + return jid +end + +function QlessThrottle:pending_pop(min, max) + local pending = Qless.throttle(self.id).pending + local jids = pending.peek(min,max) + pending.pop(min,max) + return jids +end + +function QlessThrottle:available() + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < self.maximum') + return self.maximum == 0 or self.locks.count() < self.maximum +end local QlessAPI = {} function QlessAPI.get(now, jid) @@ -1988,6 +2162,32 @@ QlessAPI['queue.forget'] = function(now, ...) QlessQueue.deregister(unpack(arg)) end +QlessAPI['throttle.set'] = function(now, tid, max) + local data = { + maximum = max + } + Qless.throttle(tid):set(data) +end + +QlessAPI['throttle.get'] = function(now, tid) + local data = Qless.throttle(tid):data() + if not data then + return nil + end + return cjson.encode(data) +end + +QlessAPI['throttle.delete'] = function(now, tid) + return Qless.throttle(tid):unset() +end + +QlessAPI['throttle.locks'] = function(now, tid) + return Qless.throttle(tid).locks.members() +end + +QlessAPI['throttle.pending'] = function(now, tid) + return Qless.throttle(tid).pending.members() +end if #KEYS > 0 then error('No Keys should be provided') end From 647c5b8eb93c3ebca80c8d04e8107399ee664f51 Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Tue, 11 Mar 2014 13:32:18 -0400 Subject: [PATCH 03/83] pass throttle when enqueing a job --- lib/qless/queue.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index 3e4d8350..7ef0e333 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -102,7 +102,8 @@ def put(klass, data, opts = {}) 'priority', opts.fetch(:priority, 0), 'tags', JSON.generate(opts.fetch(:tags, [])), 'retries', opts.fetch(:retries, 5), - 'depends', JSON.generate(opts.fetch(:depends, [])) + 'depends', JSON.generate(opts.fetch(:depends, []), + 'throttle', opts.fetch(:throttle, @name)) ) end From a48aa5c6bdba29f705315370ca6d55292576e1cc Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Tue, 11 Mar 2014 13:40:05 -0400 Subject: [PATCH 04/83] fix typo --- lib/qless/queue.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index 7ef0e333..1c74eb57 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -102,8 +102,8 @@ def put(klass, data, opts = {}) 'priority', opts.fetch(:priority, 0), 'tags', JSON.generate(opts.fetch(:tags, [])), 'retries', opts.fetch(:retries, 5), - 'depends', JSON.generate(opts.fetch(:depends, []), - 'throttle', opts.fetch(:throttle, @name)) + 'depends', JSON.generate(opts.fetch(:depends, [])), + 'throttle', opts.fetch(:throttle, @name) ) end From d6b5fe883e93ce64db7304e0cfdef3274f766110 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 11 Mar 2014 17:07:26 -0400 Subject: [PATCH 05/83] Update qless lua scripts to enable concurrency --- lib/qless/lua/qless-lib.lua | 141 ++++++++++++++++++++---------------- lib/qless/lua/qless.lua | 121 +++++++++++++++++-------------- 2 files changed, 145 insertions(+), 117 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index bd97d8d2..d621038e 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 4df412313097935d1d36bc0adc04ae09168cc53c +-- Current SHA: 27aebd45e228a2a2dc13d8cfc64a51a835a05342 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -252,7 +252,6 @@ function Qless.track(now, command, jid) assert(jid, 'Track(): Arg "jid" missing') -- Verify that job exists assert(Qless.job(jid):exists(), 'Track(): Job does not exist') - redis.call('set', 'print_line_track_command', now .. command .. jid) if string.lower(command) == 'track' then Qless.publish('track', jid) return redis.call('zadd', 'ql:tracked', now, jid) @@ -677,6 +676,8 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) + -- Release queue throttle + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) self:release_throttle(now) ---------------------------------------------------------- @@ -946,6 +947,8 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) + -- Release queue throttle + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) self:release_throttle(now) -- Add this group of failure to the list of failures @@ -1006,9 +1009,11 @@ function QlessJob:retry(now, queue, worker, delay, group, message) -- Remove it from the locks key of the old queue Qless.queue(oldqueue).locks.remove(self.jid) + -- Release the throttle for the queue + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) + -- Release the throttle for the job self:release_throttle(now) - self:acquire_throttle() -- Remove this job from the worker that was previously working it redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -1443,7 +1448,7 @@ function Qless.queue(name) peek = function(now, offset, count) return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) end, add = function(now, jid) - redis.call('zadd', queue:prefix('throttled'), jid) + redis.call('zadd', queue:prefix('throttled'), now, jid) end, remove = function(...) if #arg > 0 then return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) @@ -1653,11 +1658,6 @@ function QlessQueue:pop(now, worker, count) count = assert(tonumber(count), 'Pop(): Arg "count" missing or not a number: ' .. tostring(count)) - -- We should find the heartbeat interval for this queue heartbeat - local expires = now + tonumber( - Qless.config.get(self.name .. '-heartbeat') or - Qless.config.get('heartbeat', 60)) - -- If this queue is paused, then return no jobs if self:paused() then return {} @@ -1666,74 +1666,87 @@ function QlessQueue:pop(now, worker, count) -- Make sure we this worker to the list of seen workers redis.call('zadd', 'ql:workers', now, worker) - -- Check our max concurrency, and limit the count - local max_concurrency = tonumber( - Qless.config.get(self.name .. '-max-concurrency', 0)) - - if max_concurrency > 0 then - -- Allow at most max_concurrency - #running - local allowed = math.max(0, max_concurrency - self.locks.running(now)) - count = math.min(allowed, count) - if count == 0 then - return {} - end - end - - local jids = self:invalidate_locks(now, count) + local dead_jids = self:invalidate_locks(now, count) or {} -- Now we've checked __all__ the locks for this queue the could -- have expired, and are no more than the number requested. -- If we still need jobs in order to meet demand, then we should -- look for all the recurring jobs that need jobs run - self:check_recurring(now, count - #jids) + self:check_recurring(now, count - #dead_jids) -- If we still need values in order to meet the demand, then we -- should check if any scheduled items, and if so, we should -- insert them to ensure correctness when pulling off the next -- unit of work. - self:check_scheduled(now, count - #jids) + self:check_scheduled(now, count - #dead_jids) -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein - table.extend(jids, self.work.peek(count - #jids)) + local jids = self.work.peek(count - #dead_jids) or {} - local state + local queue_throttle = Qless.throttle(QlessQueue.ns .. self.name) + + local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) - state = unpack(job:data('state')) - job:history(now, 'popped', {worker = worker}) + if queue_throttle:acquire(jid) and job:acquire_throttle() then + self:pop_job(now, worker, job) + table.insert(popped, jid) + else + job:history(now, 'throttled', {worker = worker}) + end + end - -- Update the wait time statistics - local time = tonumber( - redis.call('hget', QlessJob.ns .. jid, 'time') or now) - local waiting = now - time - self:stat(now, 'wait', waiting) - redis.call('hset', QlessJob.ns .. jid, - 'time', string.format("%.20f", now)) + -- If we are returning any jobs, then remove popped jobs from + -- work queue + self.work.remove(unpack(popped)) - -- Add this job to the list of jobs handled by this worker - redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) + -- Process dead jids after removing newly popped jids from work queue + -- This changes the order of returned jids + for index, jid in ipairs(dead_jids) do + self:pop_job(now, worker, Qless.job(jid)) + table.insert(popped, jid) + end - -- Update the jobs data, and add its locks, and return the job - job:update({ - worker = worker, - expires = expires, - state = 'running' - }) + return popped +end - self.locks.add(expires, jid) +function QlessQueue:pop_job(now, worker, job) + local state + local jid = job.jid + state = unpack(job:data('state')) + job:history(now, 'popped', {worker = worker}) - local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false - if tracked then - Qless.publish('popped', jid) - end - end + -- We should find the heartbeat interval for this queue heartbeat + local expires = now + tonumber( + Qless.config.get(self.name .. '-heartbeat') or + Qless.config.get('heartbeat', 60)) - -- If we are returning any jobs, then we should remove them from the work - -- queue - self.work.remove(unpack(jids)) + -- Update the wait time statistics + -- Just does job:data('time') do the same as this? + local time = tonumber( + redis.call('hget', QlessJob.ns .. jid, 'time') or now) + local waiting = now - time + self:stat(now, 'wait', waiting) + redis.call('hset', QlessJob.ns .. jid, + 'time', string.format("%.20f", now)) - return jids + -- Add this job to the list of jobs handled by this worker + redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) + + -- Update the jobs data, and add its locks, and return the job + job:update({ + worker = worker, + expires = expires, + state = 'running' + }) + + self.locks.add(expires, jid) + + local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false + if tracked then + Qless.publish('popped', jid) + end end -- Update the stats for this queue @@ -1960,10 +1973,8 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') - elseif job:acquire_throttle() then - self.work.add(now, priority, jid) else - self.throttled.add(jid) + self.work.add(now, priority, jid) end end @@ -2278,8 +2289,14 @@ function QlessQueue:invalidate_locks(now, count) self.locks.remove(jid) self.scheduled.remove(jid) - local group = 'failed-retries-' .. Qless.job(jid):data()['queue'] local job = Qless.job(jid) + local job_data = Qless.job(jid):data() + local queue = job_data['queue'] + local group = 'failed-retries-' .. queue + + job:release_throttle(now) + Qless.throttle(QlessQueue.ns .. queue):release(now, jid) + job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', 'worker', '', @@ -2624,11 +2641,11 @@ end -- the job will be moved from the throttled -- queue into the work queue function QlessThrottle:release(now, jid) - redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) + --redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) - redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) + --redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) local next_jid = unpack(self:pending_pop(0, 0)) - if next_jid and self:acquire(next_jid) then + if next_jid then local job = Qless.job(next_jid):data() local queue_obj = Qless.queue(job.queue) queue_obj.throttled.remove(job.jid) @@ -2652,6 +2669,6 @@ end -- Returns true if the throttle has locks available, false otherwise. function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < self.maximum') + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.count() < self.maximum end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 9fc80362..84bb6491 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 4df412313097935d1d36bc0adc04ae09168cc53c +-- Current SHA: 27aebd45e228a2a2dc13d8cfc64a51a835a05342 -- This is a generated file local Qless = { ns = 'ql:' @@ -165,7 +165,6 @@ function Qless.track(now, command, jid) if command ~= nil then assert(jid, 'Track(): Arg "jid" missing') assert(Qless.job(jid):exists(), 'Track(): Job does not exist') - redis.call('set', 'print_line_track_command', now .. command .. jid) if string.lower(command) == 'track' then Qless.publish('track', jid) return redis.call('zadd', 'ql:tracked', now, jid) @@ -489,6 +488,7 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) self:release_throttle(now) local time = tonumber( @@ -693,6 +693,7 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) self:release_throttle(now) redis.call('sadd', 'ql:failures', group) @@ -726,8 +727,9 @@ function QlessJob:retry(now, queue, worker, delay, group, message) Qless.queue(oldqueue).locks.remove(self.jid) + Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) + self:release_throttle(now) - self:acquire_throttle() redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -1087,7 +1089,7 @@ function Qless.queue(name) peek = function(now, offset, count) return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) end, add = function(now, jid) - redis.call('zadd', queue:prefix('throttled'), jid) + redis.call('zadd', queue:prefix('throttled'), now, jid) end, remove = function(...) if #arg > 0 then return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) @@ -1231,67 +1233,74 @@ function QlessQueue:pop(now, worker, count) count = assert(tonumber(count), 'Pop(): Arg "count" missing or not a number: ' .. tostring(count)) - local expires = now + tonumber( - Qless.config.get(self.name .. '-heartbeat') or - Qless.config.get('heartbeat', 60)) - if self:paused() then return {} end redis.call('zadd', 'ql:workers', now, worker) - local max_concurrency = tonumber( - Qless.config.get(self.name .. '-max-concurrency', 0)) + local dead_jids = self:invalidate_locks(now, count) or {} - if max_concurrency > 0 then - local allowed = math.max(0, max_concurrency - self.locks.running(now)) - count = math.min(allowed, count) - if count == 0 then - return {} - end - end + self:check_recurring(now, count - #dead_jids) - local jids = self:invalidate_locks(now, count) + self:check_scheduled(now, count - #dead_jids) - self:check_recurring(now, count - #jids) + local jids = self.work.peek(count - #dead_jids) or {} - self:check_scheduled(now, count - #jids) + local queue_throttle = Qless.throttle(QlessQueue.ns .. self.name) - table.extend(jids, self.work.peek(count - #jids)) - - local state + local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) - state = unpack(job:data('state')) - job:history(now, 'popped', {worker = worker}) - - local time = tonumber( - redis.call('hget', QlessJob.ns .. jid, 'time') or now) - local waiting = now - time - self:stat(now, 'wait', waiting) - redis.call('hset', QlessJob.ns .. jid, - 'time', string.format("%.20f", now)) - - redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) - - job:update({ - worker = worker, - expires = expires, - state = 'running' - }) + if queue_throttle:acquire(jid) and job:acquire_throttle() then + self:pop_job(now, worker, job) + table.insert(popped, jid) + else + job:history(now, 'throttled', {worker = worker}) + end + end - self.locks.add(expires, jid) + self.work.remove(unpack(popped)) - local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false - if tracked then - Qless.publish('popped', jid) - end + for index, jid in ipairs(dead_jids) do + self:pop_job(now, worker, Qless.job(jid)) + table.insert(popped, jid) end - self.work.remove(unpack(jids)) + return popped +end - return jids +function QlessQueue:pop_job(now, worker, job) + local state + local jid = job.jid + state = unpack(job:data('state')) + job:history(now, 'popped', {worker = worker}) + + local expires = now + tonumber( + Qless.config.get(self.name .. '-heartbeat') or + Qless.config.get('heartbeat', 60)) + + local time = tonumber( + redis.call('hget', QlessJob.ns .. jid, 'time') or now) + local waiting = now - time + self:stat(now, 'wait', waiting) + redis.call('hset', QlessJob.ns .. jid, + 'time', string.format("%.20f", now)) + + redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) + + job:update({ + worker = worker, + expires = expires, + state = 'running' + }) + + self.locks.add(expires, jid) + + local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false + if tracked then + Qless.publish('popped', jid) + end end function QlessQueue:stat(now, stat, val) @@ -1463,10 +1472,8 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') - elseif job:acquire_throttle() then - self.work.add(now, priority, jid) else - self.throttled.add(jid) + self.work.add(now, priority, jid) end end @@ -1699,8 +1706,14 @@ function QlessQueue:invalidate_locks(now, count) self.locks.remove(jid) self.scheduled.remove(jid) - local group = 'failed-retries-' .. Qless.job(jid):data()['queue'] local job = Qless.job(jid) + local job_data = Qless.job(jid):data() + local queue = job_data['queue'] + local group = 'failed-retries-' .. queue + + job:release_throttle(now) + Qless.throttle(QlessQueue.ns .. queue):release(now, jid) + job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', 'worker', '', @@ -1949,11 +1962,9 @@ function QlessThrottle:acquire(jid) end function QlessThrottle:release(now, jid) - redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) - redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) local next_jid = unpack(self:pending_pop(0, 0)) - if next_jid and self:acquire(next_jid) then + if next_jid then local job = Qless.job(next_jid):data() local queue_obj = Qless.queue(job.queue) queue_obj.throttled.remove(job.jid) @@ -1976,7 +1987,7 @@ function QlessThrottle:pending_pop(min, max) end function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < self.maximum') + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.count() < self.maximum end local QlessAPI = {} From 70b1712299ee87ab1e5e4e0599a5695636574238 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 11 Mar 2014 17:22:43 -0400 Subject: [PATCH 06/83] Only put throttle if a throttle exists --- lib/qless/queue.rb | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index 1c74eb57..fb2a27e6 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -94,17 +94,26 @@ def unpause # => delay (int) def put(klass, data, opts = {}) opts = job_options(klass, data, opts) - @client.call('put', worker_name, @name, - (opts[:jid] || Qless.generate_jid), - klass.is_a?(String) ? klass : klass.name, - JSON.generate(data), - opts.fetch(:delay, 0), - 'priority', opts.fetch(:priority, 0), - 'tags', JSON.generate(opts.fetch(:tags, [])), - 'retries', opts.fetch(:retries, 5), - 'depends', JSON.generate(opts.fetch(:depends, [])), - 'throttle', opts.fetch(:throttle, @name) - ) + args = [ + worker_name, @name, + (opts[:jid] || Qless.generate_jid), + klass.is_a?(String) ? klass : klass.name, + JSON.generate(data), + opts.fetch(:delay, 0), + 'priority', + opts.fetch(:priority, 0), + 'tags', + JSON.generate(opts.fetch(:tags, [])), + 'retries', + opts.fetch(:retries, 5), + 'depends', + JSON.generate(opts.fetch(:depends, [])), + ] + + throttle = opts.fetch(:throttle, @name) + args.concat(['throttle', throttle]) if throttle + + @client.call('put', *args) end # Make a recurring job in this queue From f762a2ea8c93305ed775fb24a0e06f4cf5ca7fa7 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Wed, 12 Mar 2014 07:59:28 -0400 Subject: [PATCH 07/83] Update qless lua scripts for multiple throttles --- lib/qless/lua/qless-lib.lua | 130 +++++++++++++++++++++--------------- lib/qless/lua/qless.lua | 118 +++++++++++++++++++------------- 2 files changed, 150 insertions(+), 98 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index d621038e..99efb7b1 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 27aebd45e228a2a2dc13d8cfc64a51a835a05342 +-- Current SHA: 5d86598c8bf169b3e0ae6acec4214b845e2604d7 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -384,7 +384,7 @@ end -- Cancel a job from taking place. It will be deleted from the system, and any -- attempts to renew a heartbeat will fail, and any attempts to complete it -- will fail. If you try to get the data on the object, you will get nothing. -function Qless.cancel(...) +function Qless.cancel(now, ...) -- Dependents is a mapping of a job to its dependent jids local dependents = {} for _, jid in ipairs(arg) do @@ -437,7 +437,7 @@ function Qless.cancel(...) queue.depends.remove(jid) end - Qless.job(namespaced_jid):release_throttle() + Qless.job(namespaced_jid):release_throttles(now) -- We should probably go through all our dependencies and remove -- ourselves from the list of dependents @@ -556,7 +556,7 @@ function QlessJob:data(...) local job = redis.call( 'hmget', QlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority', 'expires', 'retries', 'remaining', 'data', - 'tags', 'failure', 'throttle') + 'tags', 'failure', 'throttles') -- Return nil if we haven't found it if not job[1] then @@ -579,7 +579,7 @@ function QlessJob:data(...) tags = cjson.decode(job[11]), history = self:history(), failure = cjson.decode(job[12] or '{}'), - throttle = job[13] or nil, + throttles = cjson.decode(job[13] or '[]'), dependents = redis.call( 'smembers', QlessJob.ns .. self.jid .. '-dependents'), dependencies = redis.call( @@ -676,9 +676,7 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) - -- Release queue throttle - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - self:release_throttle(now) + self:release_throttles(now) ---------------------------------------------------------- -- This is the massive stats update that we have to do @@ -947,9 +945,7 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) - -- Release queue throttle - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - self:release_throttle(now) + self:release_throttles(now) -- Add this group of failure to the list of failures redis.call('sadd', 'ql:failures', group) @@ -1009,11 +1005,8 @@ function QlessJob:retry(now, queue, worker, delay, group, message) -- Remove it from the locks key of the old queue Qless.queue(oldqueue).locks.remove(self.jid) - -- Release the throttle for the queue - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - -- Release the throttle for the job - self:release_throttle(now) + self:release_throttles(now) -- Remove this job from the worker that was previously working it redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -1350,19 +1343,34 @@ function QlessJob:history(now, what, item) end end -function QlessJob:release_throttle(now) - local tid = redis.call('hget', QlessJob.ns .. self.jid, 'throttle') - if tid then +function QlessJob:release_throttles(now) + local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') + throttles = cjson.decode(throttles or '{}') + + for _, tid in ipairs(throttles) do + redis.call('set', 'printline', 'releasing throttle : ' .. tid) Qless.throttle(tid):release(now, self.jid) end end -function QlessJob:acquire_throttle() - local tid = unpack(redis.call('hmget', QlessJob.ns .. self.jid, 'throttle')) - if tid then - return Qless.throttle(tid):acquire(self.jid) +function QlessJob:acquire_throttles(now) + local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + + local acquired_all = true + local acquired_throttles = {} + for _, tid in ipairs(throttles) do + acquired_all = acquired_all and Qless.throttle(tid):acquire(self.jid) + table.insert(acquired_throttles, tid) + end + + if not acquired_all then + redis.call('set', 'printline', 'rolling back acquired locks') + for _, tid in ipairs(acquired_throttles) do + Qless.throttle(tid):rollback_acquire(self.jid) + end end - return true + + return acquired_all end ------------------------------------------------------------------------------- -- Queue class @@ -1689,7 +1697,7 @@ function QlessQueue:pop(now, worker, count) local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) - if queue_throttle:acquire(jid) and job:acquire_throttle() then + if job:acquire_throttles(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else @@ -1834,8 +1842,10 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'Put(): Arg "priority" not a number' .. tostring(options['priority'])) local depends = assert(cjson.decode(options['depends'] or '[]') , 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends'])) - -- local throttle = options['throttle'] + local throttles = assert(cjson.decode(options['throttles'] or '[]'), + 'Put(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) + redis.call('set', 'printline', 'throttles : ' .. tostring(options['throttles'])) -- If the job has old dependencies, determine which dependencies are -- in the new dependencies but not in the old ones, and which are in the -- old ones but not in the new @@ -1921,6 +1931,9 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1) end + -- insert default queue throttle + table.insert(throttles, QlessQueue.ns .. self.name) + data = { 'jid' , jid, 'klass' , klass, @@ -1933,15 +1946,10 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", now) + 'time' , string.format("%.20f", now), + 'throttles', cjson.encode(throttles) } - -- Insert the throttle resource into the array if it exists. - if options['throttle'] then - table.insert(data, 'throttle') - table.insert(data, options['throttle']) - end - -- First, let's save its data redis.call('hmset', QlessJob.ns .. jid, unpack(data)) @@ -2065,6 +2073,8 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) options.backlog = assert(tonumber(options.backlog or 0), 'Recur(): Arg "backlog" not a number: ' .. tostring( options.backlog)) + options.throttles = assert(cjson.decode(options['throttles'] or '{}'), + 'Recur(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue')) count = count or 0 @@ -2077,19 +2087,20 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) -- Do some insertions redis.call('hmset', 'ql:r:' .. jid, - 'jid' , jid, - 'klass' , klass, - 'data' , raw_data, - 'priority', options.priority, - 'tags' , cjson.encode(options.tags or {}), - 'state' , 'recur', - 'queue' , self.name, - 'type' , 'interval', + 'jid' , jid, + 'klass' , klass, + 'data' , raw_data, + 'priority' , options.priority, + 'tags' , cjson.encode(options.tags or {}), + 'state' , 'recur', + 'queue' , self.name, + 'type' , 'interval', -- How many jobs we've spawned from this - 'count' , count, - 'interval', interval, - 'retries' , options.retries, - 'backlog' , options.backlog) + 'count' , count, + 'interval' , interval, + 'retries' , options.retries, + 'backlog' , options.backlog, + 'throttles', cjson.encode(options.throttles or {})) -- Now, we should schedule the next run of the job self.recurring.add(now + offset, jid) @@ -2125,9 +2136,12 @@ function QlessQueue:check_recurring(now, count) -- get the last time each of them was run, and then increment -- it by its interval. While this time is less than now, -- we need to keep putting jobs on the queue - local klass, data, priority, tags, retries, interval, backlog = unpack( + local r = redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', + 'tags', 'retries', 'interval', 'backlog', 'throttles') + redis.call('set', 'printline', cjson.encode(r)) + local klass, data, priority, tags, retries, interval, backlog, throttles = unpack( redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', - 'tags', 'retries', 'interval', 'backlog')) + 'tags', 'retries', 'interval', 'backlog', 'throttles')) local _tags = cjson.decode(tags) local score = math.floor(tonumber(self.recurring.score(jid))) interval = tonumber(interval) @@ -2173,7 +2187,8 @@ function QlessQueue:check_recurring(now, count) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", score)) + 'time' , string.format("%.20f", score), + 'throttles', throttles) Qless.job(child_jid):history(score, 'put', {q = self.name}) -- Now, if a delay was provided, and if it's in the future, @@ -2294,8 +2309,7 @@ function QlessQueue:invalidate_locks(now, count) local queue = job_data['queue'] local group = 'failed-retries-' .. queue - job:release_throttle(now) - Qless.throttle(QlessQueue.ns .. queue):release(now, jid) + job:release_throttles(now) job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', @@ -2624,7 +2638,7 @@ end -- Returns true of the job acquired the resource. function QlessThrottle:acquire(jid) if self:available() then - redis.call('set', 'printline', jid .. ' is acquiring the lock for ' .. self.id) + redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) self.locks.add(1, jid) return true else @@ -2634,6 +2648,17 @@ function QlessThrottle:acquire(jid) end end +-- Rolls back an attempted lock acquisition. +-- Since jobs can acquire multiple locks and the acquire +-- behavior is to either add them to the lock or pend them +-- this method handles the rolling back an acquired lock +-- on a job that failed to acquire all of its locks. +-- without placing another pending job into the queue. +function QlessThrottle:rollback_acquire(jid) + self.locks.remove(jid) + self.pending.add(1, jid) +end + -- Release a throttled resource. -- This will take a currently pending job -- and attempt to acquire a lock. @@ -2641,9 +2666,10 @@ end -- the job will be moved from the throttled -- queue into the work queue function QlessThrottle:release(now, jid) - --redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) + redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) - --redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) + + redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) local next_jid = unpack(self:pending_pop(0, 0)) if next_jid then local job = Qless.job(next_jid):data() diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 84bb6491..ad2ffa51 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 27aebd45e228a2a2dc13d8cfc64a51a835a05342 +-- Current SHA: 5d86598c8bf169b3e0ae6acec4214b845e2604d7 -- This is a generated file local Qless = { ns = 'ql:' @@ -263,7 +263,7 @@ function Qless.tag(now, command, ...) end end -function Qless.cancel(...) +function Qless.cancel(now, ...) local dependents = {} for _, jid in ipairs(arg) do dependents[jid] = redis.call( @@ -306,7 +306,7 @@ function Qless.cancel(...) queue.depends.remove(jid) end - Qless.job(namespaced_jid):release_throttle() + Qless.job(namespaced_jid):release_throttles(now) for i, j in ipairs(redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies')) do @@ -397,7 +397,7 @@ function QlessJob:data(...) local job = redis.call( 'hmget', QlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue', 'worker', 'priority', 'expires', 'retries', 'remaining', 'data', - 'tags', 'failure', 'throttle') + 'tags', 'failure', 'throttles') if not job[1] then return nil @@ -419,7 +419,7 @@ function QlessJob:data(...) tags = cjson.decode(job[11]), history = self:history(), failure = cjson.decode(job[12] or '{}'), - throttle = job[13] or nil, + throttles = cjson.decode(job[13] or '[]'), dependents = redis.call( 'smembers', QlessJob.ns .. self.jid .. '-dependents'), dependencies = redis.call( @@ -488,8 +488,7 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - self:release_throttle(now) + self:release_throttles(now) local time = tonumber( redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) @@ -693,8 +692,7 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - self:release_throttle(now) + self:release_throttles(now) redis.call('sadd', 'ql:failures', group) redis.call('lpush', 'ql:f:' .. group, self.jid) @@ -727,9 +725,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) Qless.queue(oldqueue).locks.remove(self.jid) - Qless.throttle(QlessQueue.ns .. queue):release(now, self.jid) - - self:release_throttle(now) + self:release_throttles(now) redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -1001,19 +997,34 @@ function QlessJob:history(now, what, item) end end -function QlessJob:release_throttle(now) - local tid = redis.call('hget', QlessJob.ns .. self.jid, 'throttle') - if tid then +function QlessJob:release_throttles(now) + local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') + throttles = cjson.decode(throttles or '{}') + + for _, tid in ipairs(throttles) do + redis.call('set', 'printline', 'releasing throttle : ' .. tid) Qless.throttle(tid):release(now, self.jid) end end -function QlessJob:acquire_throttle() - local tid = unpack(redis.call('hmget', QlessJob.ns .. self.jid, 'throttle')) - if tid then - return Qless.throttle(tid):acquire(self.jid) +function QlessJob:acquire_throttles(now) + local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + + local acquired_all = true + local acquired_throttles = {} + for _, tid in ipairs(throttles) do + acquired_all = acquired_all and Qless.throttle(tid):acquire(self.jid) + table.insert(acquired_throttles, tid) + end + + if not acquired_all then + redis.call('set', 'printline', 'rolling back acquired locks') + for _, tid in ipairs(acquired_throttles) do + Qless.throttle(tid):rollback_acquire(self.jid) + end end - return true + + return acquired_all end function Qless.queue(name) assert(name, 'Queue(): no queue name provided') @@ -1252,7 +1263,7 @@ function QlessQueue:pop(now, worker, count) local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) - if queue_throttle:acquire(jid) and job:acquire_throttle() then + if job:acquire_throttles(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else @@ -1366,7 +1377,10 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'Put(): Arg "priority" not a number' .. tostring(options['priority'])) local depends = assert(cjson.decode(options['depends'] or '[]') , 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends'])) + local throttles = assert(cjson.decode(options['throttles'] or '[]'), + 'Put(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) + redis.call('set', 'printline', 'throttles : ' .. tostring(options['throttles'])) if #depends > 0 then local new = {} for _, d in ipairs(depends) do new[d] = 1 end @@ -1429,6 +1443,8 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1) end + table.insert(throttles, QlessQueue.ns .. self.name) + data = { 'jid' , jid, 'klass' , klass, @@ -1441,14 +1457,10 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", now) + 'time' , string.format("%.20f", now), + 'throttles', cjson.encode(throttles) } - if options['throttle'] then - table.insert(data, 'throttle') - table.insert(data, options['throttle']) - end - redis.call('hmset', QlessJob.ns .. jid, unpack(data)) for i, j in ipairs(depends) do @@ -1551,6 +1563,8 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) options.backlog = assert(tonumber(options.backlog or 0), 'Recur(): Arg "backlog" not a number: ' .. tostring( options.backlog)) + options.throttles = assert(cjson.decode(options['throttles'] or '{}'), + 'Recur(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue')) count = count or 0 @@ -1560,18 +1574,19 @@ function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) end redis.call('hmset', 'ql:r:' .. jid, - 'jid' , jid, - 'klass' , klass, - 'data' , raw_data, - 'priority', options.priority, - 'tags' , cjson.encode(options.tags or {}), - 'state' , 'recur', - 'queue' , self.name, - 'type' , 'interval', - 'count' , count, - 'interval', interval, - 'retries' , options.retries, - 'backlog' , options.backlog) + 'jid' , jid, + 'klass' , klass, + 'data' , raw_data, + 'priority' , options.priority, + 'tags' , cjson.encode(options.tags or {}), + 'state' , 'recur', + 'queue' , self.name, + 'type' , 'interval', + 'count' , count, + 'interval' , interval, + 'retries' , options.retries, + 'backlog' , options.backlog, + 'throttles', cjson.encode(options.throttles or {})) self.recurring.add(now + offset, jid) if redis.call('zscore', 'ql:queues', self.name) == false then @@ -1592,9 +1607,12 @@ function QlessQueue:check_recurring(now, count) local moved = 0 local r = self.recurring.peek(now, 0, count) for index, jid in ipairs(r) do - local klass, data, priority, tags, retries, interval, backlog = unpack( + local r = redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', + 'tags', 'retries', 'interval', 'backlog', 'throttles') + redis.call('set', 'printline', cjson.encode(r)) + local klass, data, priority, tags, retries, interval, backlog, throttles = unpack( redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', - 'tags', 'retries', 'interval', 'backlog')) + 'tags', 'retries', 'interval', 'backlog', 'throttles')) local _tags = cjson.decode(tags) local score = math.floor(tonumber(self.recurring.score(jid))) interval = tonumber(interval) @@ -1631,7 +1649,8 @@ function QlessQueue:check_recurring(now, count) 'queue' , self.name, 'retries' , retries, 'remaining', retries, - 'time' , string.format("%.20f", score)) + 'time' , string.format("%.20f", score), + 'throttles', throttles) Qless.job(child_jid):history(score, 'put', {q = self.name}) self.work.add(score, priority, jid .. '-' .. count) @@ -1711,8 +1730,7 @@ function QlessQueue:invalidate_locks(now, count) local queue = job_data['queue'] local group = 'failed-retries-' .. queue - job:release_throttle(now) - Qless.throttle(QlessQueue.ns .. queue):release(now, jid) + job:release_throttles(now) job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', @@ -1951,7 +1969,7 @@ end function QlessThrottle:acquire(jid) if self:available() then - redis.call('set', 'printline', jid .. ' is acquiring the lock for ' .. self.id) + redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) self.locks.add(1, jid) return true else @@ -1961,8 +1979,16 @@ function QlessThrottle:acquire(jid) end end +function QlessThrottle:rollback_acquire(jid) + self.locks.remove(jid) + self.pending.add(1, jid) +end + function QlessThrottle:release(now, jid) + redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) + + redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) local next_jid = unpack(self:pending_pop(0, 0)) if next_jid then local job = Qless.job(next_jid):data() @@ -2116,7 +2142,7 @@ QlessAPI.unpause = function(now, ...) end QlessAPI.cancel = function(now, ...) - return Qless.cancel(unpack(arg)) + return Qless.cancel(now, unpack(arg)) end QlessAPI.timeout = function(now, ...) From 3e41d42091c875ddbf06f029a2c6a6ea8397cd84 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Wed, 12 Mar 2014 08:12:26 -0400 Subject: [PATCH 08/83] Add throttles accessor to job --- lib/qless/job.rb | 7 ++++--- lib/qless/queue.rb | 12 +++++------- spec/integration/job_spec.rb | 3 ++- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index 8dee87be..1123c603 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -33,7 +33,7 @@ class Job < BaseJob attr_reader :klass_name, :tracked, :dependencies, :dependents attr_reader :original_retries, :retries_left, :raw_queue_history attr_reader :state_changed - attr_accessor :data, :priority, :tags + attr_accessor :data, :priority, :tags, :throttles alias_method(:state_changed?, :state_changed) MiddlewareMisconfiguredError = Class.new(StandardError) @@ -87,7 +87,8 @@ def self.build(client, klass, attributes = {}) 'failure' => {}, 'history' => [], 'dependencies' => [], - 'dependents' => [] + 'dependents' => [], + 'throttles' => [], } attributes = defaults.merge(Qless.stringify_hash_keys(attributes)) attributes['data'] = JSON.dump(attributes['data']) @@ -103,7 +104,7 @@ def self.middlewares_on(job_klass) def initialize(client, atts) super(client, atts.fetch('jid')) %w{jid data priority tags state tracked - failure dependencies dependents}.each do |att| + failure dependencies dependents throttles}.each do |att| instance_variable_set("@#{att}".to_sym, atts.fetch(att)) end diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index fb2a27e6..1e3682e0 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -94,7 +94,8 @@ def unpause # => delay (int) def put(klass, data, opts = {}) opts = job_options(klass, data, opts) - args = [ + @client.call( + 'put', worker_name, @name, (opts[:jid] || Qless.generate_jid), klass.is_a?(String) ? klass : klass.name, @@ -108,12 +109,9 @@ def put(klass, data, opts = {}) opts.fetch(:retries, 5), 'depends', JSON.generate(opts.fetch(:depends, [])), - ] - - throttle = opts.fetch(:throttle, @name) - args.concat(['throttle', throttle]) if throttle - - @client.call('put', *args) + 'throttles', + JSON.generate(opts.fetch(:throttles, [])), + ) end # Make a recurring job in this queue diff --git a/spec/integration/job_spec.rb b/spec/integration/job_spec.rb index 111b4314..227117af 100644 --- a/spec/integration/job_spec.rb +++ b/spec/integration/job_spec.rb @@ -19,7 +19,7 @@ class NoPerformJob; end end it 'has all the attributes we would expect' do - queue.put('Foo', { whiz: 'bang' }, jid: 'jid', tags: ['foo'], retries: 3) + queue.put('Foo', { whiz: 'bang' }, jid: 'jid', tags: ['foo'], retries: 3, throttles: ['fizz', 'buzz']) job = client.jobs['jid'] expected = { jid: 'jid', @@ -34,6 +34,7 @@ class NoPerformJob; end retries_left: 3, dependencies: [], original_retries: 3, + throttles: ['fizz', 'buzz', 'ql:q:foo'], } expected.each do |key, value| expect(job.send(key)).to eq(value) From e297e55bbd77a5f5ac5777e590d6274fc6e8ae10 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Wed, 12 Mar 2014 11:31:06 -0400 Subject: [PATCH 09/83] Update lua scripts with queue throttling count --- lib/qless/lua/qless-lib.lua | 75 +++++++++++++++++++--------------- lib/qless/lua/qless.lua | 63 ++++++++++++++++------------ spec/integration/queue_spec.rb | 3 +- 3 files changed, 81 insertions(+), 60 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 99efb7b1..6509c690 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 5d86598c8bf169b3e0ae6acec4214b845e2604d7 +-- Current SHA: 32a1408dd3ead382d492471e3f987a8d8d54fab6 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -84,7 +84,7 @@ function Qless.throttle(tid) -- set of jids which have acquired a lock on this throttle. throttle.locks = { - count = function() + length = function() return (redis.call('zcard', QlessThrottle.ns .. tid .. '-locks') or 0) end, members = function() return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', 0, -1) @@ -103,7 +103,7 @@ function Qless.throttle(tid) -- set of jids waiting on this throttle to become available. throttle.pending = { - count = function() + length = function() return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) end, members = function() return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) @@ -1451,18 +1451,21 @@ function Qless.queue(name) end } - -- Access to our throttled jobs + + -- Access to the queue level throttled jobs. + -- We delegate down to a throttle here for the general queue methods. + local queue_throttle = Qless.throttle(QlessQueue.ns .. name) queue.throttled = { peek = function(now, offset, count) - return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) + return queue_throttle.pending.peek(offset, count) end, add = function(now, jid) - redis.call('zadd', queue:prefix('throttled'), now, jid) + return queue_throttle.pending.add(jid) end, remove = function(...) if #arg > 0 then - return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) + return queue_throttle.pending.remove(unpack(arg)) end end, length = function() - return redis.call('zcard', queue:prefix('throttled')) + return queue_throttle.pending.length() end } @@ -1675,6 +1678,17 @@ function QlessQueue:pop(now, worker, count) redis.call('zadd', 'ql:workers', now, worker) local dead_jids = self:invalidate_locks(now, count) or {} + local popped = {} + + for index, jid in ipairs(dead_jids) do + self:pop_job(now, worker, Qless.job(jid)) + table.insert(popped, jid) + end + + if not Qless.throttle(QlessQueue.ns .. self.name):available() then + return popped + end + -- Now we've checked __all__ the locks for this queue the could -- have expired, and are no more than the number requested. @@ -1688,13 +1702,15 @@ function QlessQueue:pop(now, worker, count) -- unit of work. self:check_scheduled(now, count - #dead_jids) + -- If we still need values in order to meet the demand, check our throttled + -- jobs. This has the side benefit of naturally updating other throttles + -- on the jobs checked. + self:check_throttled(now, count - #dead_jids) + -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein local jids = self.work.peek(count - #dead_jids) or {} - local queue_throttle = Qless.throttle(QlessQueue.ns .. self.name) - - local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:acquire_throttles(now) then @@ -1705,16 +1721,9 @@ function QlessQueue:pop(now, worker, count) end end - -- If we are returning any jobs, then remove popped jobs from - -- work queue - self.work.remove(unpack(popped)) - - -- Process dead jids after removing newly popped jids from work queue - -- This changes the order of returned jids - for index, jid in ipairs(dead_jids) do - self:pop_job(now, worker, Qless.job(jid)) - table.insert(popped, jid) - end + -- All jobs should have acquired locks or be throttled, + -- ergo, remove all jids from work queue + self.work.remove(unpack(jids)) return popped end @@ -2225,6 +2234,15 @@ function QlessQueue:check_scheduled(now, count) end end +function QlessQueue:check_throttled(now, count) + local throttled = self.throttled.peek(now, 0, count - 1) + for _, jid in ipairs(throttled) do + local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) + self.work.add(now, priority, jid) + redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') + end +end + -- Check for and invalidate any locks that have been lost. Returns the -- list of jids that have been invalidated function QlessQueue:invalidate_locks(now, count) @@ -2387,6 +2405,7 @@ function QlessQueue.counts(now, name) waiting = queue.work.length(), stalled = stalled, running = queue.locks.length() - stalled, + throttled = queue.throttled.length(), scheduled = queue.scheduled.length(), depends = queue.depends.length(), recurring = queue.recurring.length(), @@ -2639,6 +2658,7 @@ end function QlessThrottle:acquire(jid) if self:available() then redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) + self.pending.remove(jid) self.locks.add(1, jid) return true else @@ -2668,15 +2688,6 @@ end function QlessThrottle:release(now, jid) redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) - - redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) - local next_jid = unpack(self:pending_pop(0, 0)) - if next_jid then - local job = Qless.job(next_jid):data() - local queue_obj = Qless.queue(job.queue) - queue_obj.throttled.remove(job.jid) - queue_obj.work.add(now, job.priority, job.jid) - end end function QlessThrottle:lock_pop(min, max) @@ -2695,6 +2706,6 @@ end -- Returns true if the throttle has locks available, false otherwise. function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < ' .. self.maximum) - return self.maximum == 0 or self.locks.count() < self.maximum + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) + return self.maximum == 0 or self.locks.length() < self.maximum end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index ad2ffa51..226e776f 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 5d86598c8bf169b3e0ae6acec4214b845e2604d7 +-- Current SHA: 32a1408dd3ead382d492471e3f987a8d8d54fab6 -- This is a generated file local Qless = { ns = 'ql:' @@ -67,7 +67,7 @@ function Qless.throttle(tid) setmetatable(throttle, QlessThrottle) throttle.locks = { - count = function() + length = function() return (redis.call('zcard', QlessThrottle.ns .. tid .. '-locks') or 0) end, members = function() return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', 0, -1) @@ -85,7 +85,7 @@ function Qless.throttle(tid) } throttle.pending = { - count = function() + length = function() return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) end, members = function() return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) @@ -1096,17 +1096,19 @@ function Qless.queue(name) end } + + local queue_throttle = Qless.throttle(QlessQueue.ns .. name) queue.throttled = { peek = function(now, offset, count) - return redis.call('zrange', queue:prefix('throttled'), offset, offset + count - 1) + return queue_throttle.pending.peek(offset, count) end, add = function(now, jid) - redis.call('zadd', queue:prefix('throttled'), now, jid) + return queue_throttle.pending.add(jid) end, remove = function(...) if #arg > 0 then - return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) + return queue_throttle.pending.remove(unpack(arg)) end end, length = function() - return redis.call('zcard', queue:prefix('throttled')) + return queue_throttle.pending.length() end } @@ -1251,16 +1253,26 @@ function QlessQueue:pop(now, worker, count) redis.call('zadd', 'ql:workers', now, worker) local dead_jids = self:invalidate_locks(now, count) or {} + local popped = {} + + for index, jid in ipairs(dead_jids) do + self:pop_job(now, worker, Qless.job(jid)) + table.insert(popped, jid) + end + + if not Qless.throttle(QlessQueue.ns .. self.name):available() then + return popped + end + self:check_recurring(now, count - #dead_jids) self:check_scheduled(now, count - #dead_jids) - local jids = self.work.peek(count - #dead_jids) or {} + self:check_throttled(now, count - #dead_jids) - local queue_throttle = Qless.throttle(QlessQueue.ns .. self.name) + local jids = self.work.peek(count - #dead_jids) or {} - local popped = {} for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:acquire_throttles(now) then @@ -1271,12 +1283,7 @@ function QlessQueue:pop(now, worker, count) end end - self.work.remove(unpack(popped)) - - for index, jid in ipairs(dead_jids) do - self:pop_job(now, worker, Qless.job(jid)) - table.insert(popped, jid) - end + self.work.remove(unpack(jids)) return popped end @@ -1673,6 +1680,15 @@ function QlessQueue:check_scheduled(now, count) end end +function QlessQueue:check_throttled(now, count) + local throttled = self.throttled.peek(now, 0, count - 1) + for _, jid in ipairs(throttled) do + local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) + self.work.add(now, priority, jid) + redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') + end +end + function QlessQueue:invalidate_locks(now, count) local jids = {} for index, jid in ipairs(self.locks.expired(now, 0, count)) do @@ -1788,6 +1804,7 @@ function QlessQueue.counts(now, name) waiting = queue.work.length(), stalled = stalled, running = queue.locks.length() - stalled, + throttled = queue.throttled.length(), scheduled = queue.scheduled.length(), depends = queue.depends.length(), recurring = queue.recurring.length(), @@ -1970,6 +1987,7 @@ end function QlessThrottle:acquire(jid) if self:available() then redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) + self.pending.remove(jid) self.locks.add(1, jid) return true else @@ -1987,15 +2005,6 @@ end function QlessThrottle:release(now, jid) redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) - - redis.call('set', 'printline', 'retrieving next job from pending on ' .. self.id) - local next_jid = unpack(self:pending_pop(0, 0)) - if next_jid then - local job = Qless.job(next_jid):data() - local queue_obj = Qless.queue(job.queue) - queue_obj.throttled.remove(job.jid) - queue_obj.work.add(now, job.priority, job.jid) - end end function QlessThrottle:lock_pop(min, max) @@ -2013,8 +2022,8 @@ function QlessThrottle:pending_pop(min, max) end function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.count() .. ' < ' .. self.maximum) - return self.maximum == 0 or self.locks.count() < self.maximum + redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) + return self.maximum == 0 or self.locks.length() < self.maximum end local QlessAPI = {} diff --git a/spec/integration/queue_spec.rb b/spec/integration/queue_spec.rb index b2316348..b86482ed 100644 --- a/spec/integration/queue_spec.rb +++ b/spec/integration/queue_spec.rb @@ -27,7 +27,8 @@ module Qless 'scheduled' => 0, 'running' => 0, 'stalled' => 0, - 'waiting' => 1 + 'waiting' => 1, + 'throttled' => 0, }) end From ab69d8ab1e51db05743497ae30657713d24cd639 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Wed, 12 Mar 2014 14:53:41 -0400 Subject: [PATCH 10/83] Fix throttle integration --- lib/qless/lua/qless-lib.lua | 8 ++++---- lib/qless/lua/qless.lua | 24 ++++++++++++++++-------- lib/qless/queue.rb | 4 ++-- lib/qless/server/views/overview.erb | 2 ++ 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 6509c690..16f153b6 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 32a1408dd3ead382d492471e3f987a8d8d54fab6 +-- Current SHA: a70362b19f849af843d8685d6519db3595f553f2 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -27,7 +27,7 @@ QlessJob.__index = QlessJob -- throttle forward declaration local QlessThrottle = { - ns = Qless.ns .. 't:' + ns = Qless.ns .. 'th:' } QlessThrottle.__index = QlessThrottle @@ -2629,9 +2629,9 @@ end -- Retrieve the data fro a throttled resource function QlessThrottle:data() local throttle = redis.call('hmget', QlessThrottle.ns .. self.id, 'id', 'maximum') - -- Return nil if we haven't found it + -- Return default if it doesn't exist if not throttle[1] then - return nil + return {id = self.id, maximum = 0} end local data = { diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 226e776f..28369c2f 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 32a1408dd3ead382d492471e3f987a8d8d54fab6 +-- Current SHA: a70362b19f849af843d8685d6519db3595f553f2 -- This is a generated file local Qless = { ns = 'ql:' @@ -20,7 +20,7 @@ local QlessJob = { QlessJob.__index = QlessJob local QlessThrottle = { - ns = Qless.ns .. 't:' + ns = Qless.ns .. 'th:' } QlessThrottle.__index = QlessThrottle @@ -1966,7 +1966,7 @@ end function QlessThrottle:data() local throttle = redis.call('hmget', QlessThrottle.ns .. self.id, 'id', 'maximum') if not throttle[1] then - return nil + return {id = self.id, maximum = 0} end local data = { @@ -2208,6 +2208,18 @@ QlessAPI['queue.forget'] = function(now, ...) QlessQueue.deregister(unpack(arg)) end +QlessAPI['queue.throttle.get'] = function(now, queue) + local data = Qless.throttle(QlessQueue.ns .. queue):data() + if not data then + return nil + end + return cjson.encode(data) +end + +QlessAPI['queue.throttle.set'] = function(now, queue, max) + Qless.throttle(QlessQueue.ns .. queue):set({maximum = max}) +end + QlessAPI['throttle.set'] = function(now, tid, max) local data = { maximum = max @@ -2216,11 +2228,7 @@ QlessAPI['throttle.set'] = function(now, tid, max) end QlessAPI['throttle.get'] = function(now, tid) - local data = Qless.throttle(tid):data() - if not data then - return nil - end - return cjson.encode(data) + return cjson.encode(Qless.throttle(tid):data()) end QlessAPI['throttle.delete'] = function(now, tid) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index 1e3682e0..178baeca 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -66,12 +66,12 @@ def heartbeat=(value) end def max_concurrency - value = get_config('max-concurrency') + value = JSON.parse(@client.call('queue.throttle.get', @name))['maximum'] value && Integer(value) end def max_concurrency=(value) - set_config 'max-concurrency', value + @client.call('queue.throttle.set', @name, value) end def paused? diff --git a/lib/qless/server/views/overview.erb b/lib/qless/server/views/overview.erb index 0d09d67d..0706e0c0 100644 --- a/lib/qless/server/views/overview.erb +++ b/lib/qless/server/views/overview.erb @@ -13,6 +13,7 @@ running waiting + throttled scheduled stalled depends @@ -42,6 +43,7 @@ <%= queue['running'] %> <%= queue['waiting'] %> + <%= queue['throttled'] %> <%= queue['scheduled'] %> <%= queue['stalled'] %> <%= queue['depends'] %> From a982b6d52540201b6ffc3d656942364d2b82ff5c Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 12 Mar 2014 16:37:04 -0400 Subject: [PATCH 11/83] test fixes --- lib/qless/lua/qless-lib.lua | 156 +++++++++++++++----------------- lib/qless/lua/qless.lua | 123 +++++++++++-------------- spec/integration/server_spec.rb | 1 + spec/unit/throttle_spec.rb | 8 +- 4 files changed, 130 insertions(+), 158 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 16f153b6..750e0ff0 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: a70362b19f849af843d8685d6519db3595f553f2 +-- Current SHA: fc332c90c61b3cb497d5afca2b745b8d243921fc -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -102,25 +102,25 @@ function Qless.throttle(tid) } -- set of jids waiting on this throttle to become available. - throttle.pending = { - length = function() - return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) - end, members = function() - return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) - end, peek = function(min, max) - return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) - end, add = function(...) - if #arg > 0 then - redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - end - end, remove = function(...) - if #arg > 0 then - return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - end - end, pop = function(min, max) - return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) - end - } + -- throttle.pending = { + -- length = function() + -- return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) + -- end, members = function() + -- return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) + -- end, peek = function(min, max) + -- return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) + -- end, add = function(...) + -- if #arg > 0 then + -- redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + -- end + -- end, remove = function(...) + -- if #arg > 0 then + -- return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + -- end + -- end, pop = function(min, max) + -- return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) + -- end + -- } return throttle end @@ -209,6 +209,8 @@ function Qless.jobs(now, state, ...) return queue.locks.peek(now, offset, count) elseif state == 'stalled' then return queue.locks.expired(now, offset, count) + elseif state == 'throttled' then + return queue.throttled.peek(now, offset, count) elseif state == 'scheduled' then queue:check_scheduled(now, queue.scheduled.length()) return queue.scheduled.peek(now, offset, count) @@ -1356,21 +1358,28 @@ end function QlessJob:acquire_throttles(now) local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) - local acquired_all = true - local acquired_throttles = {} + local all_locks_available = true + + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability') for _, tid in ipairs(throttles) do - acquired_all = acquired_all and Qless.throttle(tid):acquire(self.jid) - table.insert(acquired_throttles, tid) + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability for ' .. tid) + all_locks_available = all_locks_available and Qless.throttle(tid):available() + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttle available ' .. tid) end - if not acquired_all then - redis.call('set', 'printline', 'rolling back acquired locks') - for _, tid in ipairs(acquired_throttles) do - Qless.throttle(tid):rollback_acquire(self.jid) - end + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - short circuit if we can not acquire locks ' .. tostring(all_locks_available)) + if not all_locks_available then + return false end - return acquired_all + redis.call('set', 'printline', 'QlessJob:acquire_throttles - grabbing locks') + redis.call('set', 'printline', 'QlessJob:acquire_throttles - inside if') + for _, tid in ipairs(throttles) do + redis.call('set', 'printline', 'QlessJob:acquire_throttles - invoking QlessThrottle:acquire') + Qless.throttle(tid):acquire(self.jid) + end + redis.call('set', 'printline', 'QlessJob:acquire_throttles - successfully completed') + return true end ------------------------------------------------------------------------------- -- Queue class @@ -1453,19 +1462,21 @@ function Qless.queue(name) -- Access to the queue level throttled jobs. - -- We delegate down to a throttle here for the general queue methods. - local queue_throttle = Qless.throttle(QlessQueue.ns .. name) queue.throttled = { - peek = function(now, offset, count) - return queue_throttle.pending.peek(offset, count) - end, add = function(now, jid) - return queue_throttle.pending.add(jid) + length = function() + return (redis.call('zcard', queue:prefix('throttled')) or 0) + end, peek = function(now, min, max) + return redis.call('zrange', queue:prefix('throttled'), min, max) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', queue:prefix('throttled'), unpack(arg)) + end end, remove = function(...) if #arg > 0 then - return queue_throttle.pending.remove(unpack(arg)) + return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) end - end, length = function() - return queue_throttle.pending.length() + end, pop = function(min, max) + return redis.call('zremrangebyrank', queue:prefix('throttled'), min, max) end } @@ -1685,6 +1696,7 @@ function QlessQueue:pop(now, worker, count) table.insert(popped, jid) end + -- if queue is at max capacity don't pop any further jobs. if not Qless.throttle(QlessQueue.ns .. self.name):available() then return popped end @@ -1710,14 +1722,15 @@ function QlessQueue:pop(now, worker, count) -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein local jids = self.work.peek(count - #dead_jids) or {} - + redis.call('set', 'printline', 'Pop - before acquire') for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:acquire_throttles(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else - job:history(now, 'throttled', {worker = worker}) + redis.call('set', 'printline', 'QlessQueue:pop - throttling ' .. job.jid) + self:throttle(now, job) end end @@ -1728,6 +1741,19 @@ function QlessQueue:pop(now, worker, count) return popped end +-- Throttle a job +function QlessQueue:throttle(now, job) + self.throttled.add(now, job.jid) + redis.call('set', 'printline', 'QlessQueue:throttle - get state') + local state = unpack(job:data('state')) + redis.call('set', 'printline', 'QlessQueue:throttle - check state') + if state ~= 'throttled' then + redis.call('set', 'printline', 'QlessQueue:throttle - update job') + job:update({state = 'throttled'}) + job:history(now, 'throttled', {queue = self.name}) + end +end + function QlessQueue:pop_job(now, worker, job) local state local jid = job.jid @@ -2239,6 +2265,7 @@ function QlessQueue:check_throttled(now, count) for _, jid in ipairs(throttled) do local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) self.work.add(now, priority, jid) + self.throttled.remove(jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') end end @@ -2652,58 +2679,25 @@ function QlessThrottle:unset() end -- Acquire a throttled resource for a job. --- if the resource is at full capacity then add it to the pending --- set. --- Returns true of the job acquired the resource. +-- Returns true of the job acquired the resource, false otherwise function QlessThrottle:acquire(jid) - if self:available() then - redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) - self.pending.remove(jid) - self.locks.add(1, jid) - return true - else - redis.call('set', 'printline', jid .. ' failed acquiring the lock for ' .. self.id .. ' marked as pending') - self.pending.add(1, jid) + redis.call('set', 'printline', 'QlessThrottle:acquire - checking availability') + if not self:available() then + redis.call('set', 'printline', jid .. ' failed to acquire lock on ' .. self.id) return false end -end --- Rolls back an attempted lock acquisition. --- Since jobs can acquire multiple locks and the acquire --- behavior is to either add them to the lock or pend them --- this method handles the rolling back an acquired lock --- on a job that failed to acquire all of its locks. --- without placing another pending job into the queue. -function QlessThrottle:rollback_acquire(jid) - self.locks.remove(jid) - self.pending.add(1, jid) + redis.call('set', 'printline', jid .. ' acquired a lock on ' .. self.id) + self.locks.add(1, jid) + return true end -- Release a throttled resource. --- This will take a currently pending job --- and attempt to acquire a lock. --- If it succeeds at acquiring a lock then --- the job will be moved from the throttled --- queue into the work queue function QlessThrottle:release(now, jid) redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) end -function QlessThrottle:lock_pop(min, max) - local lock = Qless.throttle(self.id).locks - local jid = lock.peek(min,max) - lock.pop(min,max) - return jid -end - -function QlessThrottle:pending_pop(min, max) - local pending = Qless.throttle(self.id).pending - local jids = pending.peek(min,max) - pending.pop(min,max) - return jids -end - -- Returns true if the throttle has locks available, false otherwise. function QlessThrottle:available() redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 28369c2f..e837b8b3 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: a70362b19f849af843d8685d6519db3595f553f2 +-- Current SHA: fc332c90c61b3cb497d5afca2b745b8d243921fc -- This is a generated file local Qless = { ns = 'ql:' @@ -84,25 +84,6 @@ function Qless.throttle(tid) end } - throttle.pending = { - length = function() - return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) - end, members = function() - return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) - end, peek = function(min, max) - return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) - end, add = function(...) - if #arg > 0 then - redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - end - end, remove = function(...) - if #arg > 0 then - return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - end - end, pop = function(min, max) - return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) - end - } return throttle end @@ -148,6 +129,8 @@ function Qless.jobs(now, state, ...) return queue.locks.peek(now, offset, count) elseif state == 'stalled' then return queue.locks.expired(now, offset, count) + elseif state == 'throttled' then + return queue.throttled.peek(now, offset, count) elseif state == 'scheduled' then queue:check_scheduled(now, queue.scheduled.length()) return queue.scheduled.peek(now, offset, count) @@ -1010,21 +993,28 @@ end function QlessJob:acquire_throttles(now) local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) - local acquired_all = true - local acquired_throttles = {} + local all_locks_available = true + + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability') for _, tid in ipairs(throttles) do - acquired_all = acquired_all and Qless.throttle(tid):acquire(self.jid) - table.insert(acquired_throttles, tid) + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability for ' .. tid) + all_locks_available = all_locks_available and Qless.throttle(tid):available() + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttle available ' .. tid) end - if not acquired_all then - redis.call('set', 'printline', 'rolling back acquired locks') - for _, tid in ipairs(acquired_throttles) do - Qless.throttle(tid):rollback_acquire(self.jid) - end + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - short circuit if we can not acquire locks ' .. tostring(all_locks_available)) + if not all_locks_available then + return false end - return acquired_all + redis.call('set', 'printline', 'QlessJob:acquire_throttles - grabbing locks') + redis.call('set', 'printline', 'QlessJob:acquire_throttles - inside if') + for _, tid in ipairs(throttles) do + redis.call('set', 'printline', 'QlessJob:acquire_throttles - invoking QlessThrottle:acquire') + Qless.throttle(tid):acquire(self.jid) + end + redis.call('set', 'printline', 'QlessJob:acquire_throttles - successfully completed') + return true end function Qless.queue(name) assert(name, 'Queue(): no queue name provided') @@ -1097,18 +1087,21 @@ function Qless.queue(name) } - local queue_throttle = Qless.throttle(QlessQueue.ns .. name) queue.throttled = { - peek = function(now, offset, count) - return queue_throttle.pending.peek(offset, count) - end, add = function(now, jid) - return queue_throttle.pending.add(jid) + length = function() + return (redis.call('zcard', queue:prefix('throttled')) or 0) + end, peek = function(now, min, max) + return redis.call('zrange', queue:prefix('throttled'), min, max) + end, add = function(...) + if #arg > 0 then + redis.call('zadd', queue:prefix('throttled'), unpack(arg)) + end end, remove = function(...) if #arg > 0 then - return queue_throttle.pending.remove(unpack(arg)) + return redis.call('zrem', queue:prefix('throttled'), unpack(arg)) end - end, length = function() - return queue_throttle.pending.length() + end, pop = function(min, max) + return redis.call('zremrangebyrank', queue:prefix('throttled'), min, max) end } @@ -1272,14 +1265,15 @@ function QlessQueue:pop(now, worker, count) self:check_throttled(now, count - #dead_jids) local jids = self.work.peek(count - #dead_jids) or {} - + redis.call('set', 'printline', 'Pop - before acquire') for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:acquire_throttles(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else - job:history(now, 'throttled', {worker = worker}) + redis.call('set', 'printline', 'QlessQueue:pop - throttling ' .. job.jid) + self:throttle(now, job) end end @@ -1288,6 +1282,18 @@ function QlessQueue:pop(now, worker, count) return popped end +function QlessQueue:throttle(now, job) + self.throttled.add(now, job.jid) + redis.call('set', 'printline', 'QlessQueue:throttle - get state') + local state = unpack(job:data('state')) + redis.call('set', 'printline', 'QlessQueue:throttle - check state') + if state ~= 'throttled' then + redis.call('set', 'printline', 'QlessQueue:throttle - update job') + job:update({state = 'throttled'}) + job:history(now, 'throttled', {queue = self.name}) + end +end + function QlessQueue:pop_job(now, worker, job) local state local jid = job.jid @@ -1685,6 +1691,7 @@ function QlessQueue:check_throttled(now, count) for _, jid in ipairs(throttled) do local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) self.work.add(now, priority, jid) + self.throttled.remove(jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') end end @@ -1985,21 +1992,15 @@ function QlessThrottle:unset() end function QlessThrottle:acquire(jid) - if self:available() then - redis.call('set', 'printline', jid .. ' acquired the lock for ' .. self.id) - self.pending.remove(jid) - self.locks.add(1, jid) - return true - else - redis.call('set', 'printline', jid .. ' failed acquiring the lock for ' .. self.id .. ' marked as pending') - self.pending.add(1, jid) + redis.call('set', 'printline', 'QlessThrottle:acquire - checking availability') + if not self:available() then + redis.call('set', 'printline', jid .. ' failed to acquire lock on ' .. self.id) return false end -end -function QlessThrottle:rollback_acquire(jid) - self.locks.remove(jid) - self.pending.add(1, jid) + redis.call('set', 'printline', jid .. ' acquired a lock on ' .. self.id) + self.locks.add(1, jid) + return true end function QlessThrottle:release(now, jid) @@ -2007,20 +2008,6 @@ function QlessThrottle:release(now, jid) self.locks.remove(jid) end -function QlessThrottle:lock_pop(min, max) - local lock = Qless.throttle(self.id).locks - local jid = lock.peek(min,max) - lock.pop(min,max) - return jid -end - -function QlessThrottle:pending_pop(min, max) - local pending = Qless.throttle(self.id).pending - local jids = pending.peek(min,max) - pending.pop(min,max) - return jids -end - function QlessThrottle:available() redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.length() < self.maximum @@ -2239,10 +2226,6 @@ QlessAPI['throttle.locks'] = function(now, tid) return Qless.throttle(tid).locks.members() end -QlessAPI['throttle.pending'] = function(now, tid) - return Qless.throttle(tid).pending.members() -end - if #KEYS > 0 then error('No Keys should be provided') end local command_name = assert(table.remove(ARGV, 1), 'Must provide a command') diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 06f52419..7caed7a5 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -824,6 +824,7 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) 'depends' => 0, 'stalled' => 0, 'scheduled' => 0, + 'throttled' => 0, 'paused' => false } JSON.parse(last_response.body).should eq([response]) diff --git a/spec/unit/throttle_spec.rb b/spec/unit/throttle_spec.rb index 123eda46..4dc26d5c 100644 --- a/spec/unit/throttle_spec.rb +++ b/spec/unit/throttle_spec.rb @@ -27,7 +27,7 @@ module Qless it "returns the set of locked jids" do t = Throttle.new('name', client) - Redis.current.zadd('ql:t:name-locks', [[1, 1], [1, 2], [1, 3]]) + Redis.current.zadd('ql:th:name-locks', [[1, 1], [1, 2], [1, 3]]) t.locks.should eq(["1", "2", "3"]) end @@ -37,12 +37,6 @@ module Qless t.maximum.should eq(5) end - it "returns the set of pending jids" do - t = Throttle.new('name', client) - Redis.current.zadd('ql:t:name-pending', [[1, 1], [1, 2], [1, 3]]) - t.pending.should eq(["1", "2", "3"]) - end - it "handles throttle names as a String or Symbol" do t = Throttle.new('name', client) t.maximum = 5 From 69f79e681a6da30be97cf045c4b1a213b77f9225 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Thu, 13 Mar 2014 07:05:36 -0400 Subject: [PATCH 12/83] new qless core scripts to fix throttling deadlock --- lib/qless/lua/qless-lib.lua | 105 +++++++++++++++++++----------------- lib/qless/lua/qless.lua | 81 +++++++++++++++++----------- 2 files changed, 106 insertions(+), 80 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 750e0ff0..bd500520 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: fc332c90c61b3cb497d5afca2b745b8d243921fc +-- Current SHA: 730c668fa06e6a7bee288528e2f6da72dbaf349c -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -101,26 +101,6 @@ function Qless.throttle(tid) end } - -- set of jids waiting on this throttle to become available. - -- throttle.pending = { - -- length = function() - -- return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) - -- end, members = function() - -- return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) - -- end, peek = function(min, max) - -- return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) - -- end, add = function(...) - -- if #arg > 0 then - -- redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - -- end - -- end, remove = function(...) - -- if #arg > 0 then - -- return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) - -- end - -- end, pop = function(min, max) - -- return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) - -- end - -- } return throttle end @@ -439,7 +419,7 @@ function Qless.cancel(now, ...) queue.depends.remove(jid) end - Qless.job(namespaced_jid):release_throttles(now) + Qless.job(namespaced_jid):throttles_release(now) -- We should probably go through all our dependencies and remove -- ourselves from the list of dependents @@ -678,7 +658,7 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) - self:release_throttles(now) + self:throttles_release(now) ---------------------------------------------------------- -- This is the massive stats update that we have to do @@ -947,7 +927,7 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) - self:release_throttles(now) + self:throttles_release(now) -- Add this group of failure to the list of failures redis.call('sadd', 'ql:failures', group) @@ -1008,7 +988,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) Qless.queue(oldqueue).locks.remove(self.jid) -- Release the throttle for the job - self:release_throttles(now) + self:throttles_release(now) -- Remove this job from the worker that was previously working it redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -1345,7 +1325,7 @@ function QlessJob:history(now, what, item) end end -function QlessJob:release_throttles(now) +function QlessJob:throttles_release(now) local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') throttles = cjson.decode(throttles or '{}') @@ -1355,32 +1335,43 @@ function QlessJob:release_throttles(now) end end -function QlessJob:acquire_throttles(now) - local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) - - local all_locks_available = true - - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability') - for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability for ' .. tid) - all_locks_available = all_locks_available and Qless.throttle(tid):available() - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttle available ' .. tid) +function QlessJob:throttles_available() + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking throttle availability') + for _, tid in ipairs(self:throttles()) do + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking availability for ' .. tid) + if not Qless.throttle(tid):available() then + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle not available ' .. tid) + return false + end + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle available ' .. tid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - short circuit if we can not acquire locks ' .. tostring(all_locks_available)) - if not all_locks_available then + return true +end + +function QlessJob:throttles_acquire(now) + if not self:throttles_available() then + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttles not avaible') return false end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - grabbing locks') - redis.call('set', 'printline', 'QlessJob:acquire_throttles - inside if') - for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - invoking QlessThrottle:acquire') + for _, tid in ipairs(self:throttles()) do + redis.call('set', 'printline', 'QlessJob:acquire_throttles - acquiring ' .. tid) Qless.throttle(tid):acquire(self.jid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - successfully completed') + + redis.call('set', 'printline', 'QlessJob:acquire_throttles - throttles avaible') return true end + +function QlessJob:throttles() + -- memoize throttles for the job. + if not self._throttles then + self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + end + + return self._throttles +end ------------------------------------------------------------------------------- -- Queue class ------------------------------------------------------------------------------- @@ -1701,6 +1692,7 @@ function QlessQueue:pop(now, worker, count) return popped end + redis.call('set', 'printline', 'dead_jids : ' .. tostring(#dead_jids)) -- Now we've checked __all__ the locks for this queue the could -- have expired, and are no more than the number requested. @@ -1722,10 +1714,10 @@ function QlessQueue:pop(now, worker, count) -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein local jids = self.work.peek(count - #dead_jids) or {} - redis.call('set', 'printline', 'Pop - before acquire') + for index, jid in ipairs(jids) do local job = Qless.job(jid) - if job:acquire_throttles(now) then + if job:throttles_acquire(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else @@ -2261,12 +2253,25 @@ function QlessQueue:check_scheduled(now, count) end function QlessQueue:check_throttled(now, count) + if count == 0 then + redis.call('set', 'printline', 'count 0 not popping any throttled jobs') + return + end + + -- minus 1 since its inclusive local throttled = self.throttled.peek(now, 0, count - 1) + redis.call('set', 'printline', 'throttling the following jobs ' .. cjson.encode(throttled)) for _, jid in ipairs(throttled) do - local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) - self.work.add(now, priority, jid) self.throttled.remove(jid) - redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') + if Qless.job(jid):throttles_available() then + local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) + self.work.add(now, priority, jid) + self.throttled.remove(jid) + else + -- shift jid to end of throttled jobs + -- use current time to make sure it gets added to the end of the sorted set. + self.throttled.add(now, jid) + end end end @@ -2354,7 +2359,7 @@ function QlessQueue:invalidate_locks(now, count) local queue = job_data['queue'] local group = 'failed-retries-' .. queue - job:release_throttles(now) + job:throttles_release(now) job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', @@ -2700,6 +2705,6 @@ end -- Returns true if the throttle has locks available, false otherwise. function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) + redis.call('set', 'printline', self.id .. ' available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.length() < self.maximum end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index e837b8b3..3f849e12 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: fc332c90c61b3cb497d5afca2b745b8d243921fc +-- Current SHA: 730c668fa06e6a7bee288528e2f6da72dbaf349c -- This is a generated file local Qless = { ns = 'ql:' @@ -289,7 +289,7 @@ function Qless.cancel(now, ...) queue.depends.remove(jid) end - Qless.job(namespaced_jid):release_throttles(now) + Qless.job(namespaced_jid):throttles_release(now) for i, j in ipairs(redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies')) do @@ -471,7 +471,7 @@ function QlessJob:complete(now, worker, queue, data, ...) queue_obj.locks.remove(self.jid) queue_obj.scheduled.remove(self.jid) - self:release_throttles(now) + self:throttles_release(now) local time = tonumber( redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) @@ -675,7 +675,7 @@ function QlessJob:fail(now, worker, group, message, data) ['worker'] = worker })) - self:release_throttles(now) + self:throttles_release(now) redis.call('sadd', 'ql:failures', group) redis.call('lpush', 'ql:f:' .. group, self.jid) @@ -708,7 +708,7 @@ function QlessJob:retry(now, queue, worker, delay, group, message) Qless.queue(oldqueue).locks.remove(self.jid) - self:release_throttles(now) + self:throttles_release(now) redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) @@ -980,7 +980,7 @@ function QlessJob:history(now, what, item) end end -function QlessJob:release_throttles(now) +function QlessJob:throttles_release(now) local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') throttles = cjson.decode(throttles or '{}') @@ -990,32 +990,42 @@ function QlessJob:release_throttles(now) end end -function QlessJob:acquire_throttles(now) - local throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) - - local all_locks_available = true - - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability') - for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - checking availability for ' .. tid) - all_locks_available = all_locks_available and Qless.throttle(tid):available() - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttle available ' .. tid) +function QlessJob:throttles_available() + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking throttle availability') + for _, tid in ipairs(self:throttles()) do + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking availability for ' .. tid) + if not Qless.throttle(tid):available() then + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle not available ' .. tid) + return false + end + redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle available ' .. tid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - short circuit if we can not acquire locks ' .. tostring(all_locks_available)) - if not all_locks_available then + return true +end + +function QlessJob:throttles_acquire(now) + if not self:throttles_available() then + redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttles not avaible') return false end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - grabbing locks') - redis.call('set', 'printline', 'QlessJob:acquire_throttles - inside if') - for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - invoking QlessThrottle:acquire') + for _, tid in ipairs(self:throttles()) do + redis.call('set', 'printline', 'QlessJob:acquire_throttles - acquiring ' .. tid) Qless.throttle(tid):acquire(self.jid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - successfully completed') + + redis.call('set', 'printline', 'QlessJob:acquire_throttles - throttles avaible') return true end + +function QlessJob:throttles() + if not self._throttles then + self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + end + + return self._throttles +end function Qless.queue(name) assert(name, 'Queue(): no queue name provided') local queue = {} @@ -1257,6 +1267,7 @@ function QlessQueue:pop(now, worker, count) return popped end + redis.call('set', 'printline', 'dead_jids : ' .. tostring(#dead_jids)) self:check_recurring(now, count - #dead_jids) @@ -1265,10 +1276,10 @@ function QlessQueue:pop(now, worker, count) self:check_throttled(now, count - #dead_jids) local jids = self.work.peek(count - #dead_jids) or {} - redis.call('set', 'printline', 'Pop - before acquire') + for index, jid in ipairs(jids) do local job = Qless.job(jid) - if job:acquire_throttles(now) then + if job:throttles_acquire(now) then self:pop_job(now, worker, job) table.insert(popped, jid) else @@ -1687,12 +1698,22 @@ function QlessQueue:check_scheduled(now, count) end function QlessQueue:check_throttled(now, count) + if count == 0 then + redis.call('set', 'printline', 'count 0 not popping any throttled jobs') + return + end + local throttled = self.throttled.peek(now, 0, count - 1) + redis.call('set', 'printline', 'throttling the following jobs ' .. cjson.encode(throttled)) for _, jid in ipairs(throttled) do - local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) - self.work.add(now, priority, jid) self.throttled.remove(jid) - redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') + if Qless.job(jid):throttles_available() then + local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) + self.work.add(now, priority, jid) + self.throttled.remove(jid) + else + self.throttled.add(now, jid) + end end end @@ -1753,7 +1774,7 @@ function QlessQueue:invalidate_locks(now, count) local queue = job_data['queue'] local group = 'failed-retries-' .. queue - job:release_throttles(now) + job:throttles_release(now) job:history(now, 'failed', {group = group}) redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', @@ -2009,7 +2030,7 @@ function QlessThrottle:release(now, jid) end function QlessThrottle:available() - redis.call('set', 'printline', 'available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) + redis.call('set', 'printline', self.id .. ' available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.length() < self.maximum end local QlessAPI = {} From 56481498065afcccc196fa403df07fa81c920fc7 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Thu, 13 Mar 2014 08:12:44 -0400 Subject: [PATCH 13/83] updated qless scripts with no printlines --- lib/qless/lua/qless-lib.lua | 34 ++++++++++------------------------ lib/qless/lua/qless.lua | 37 +++++++++++-------------------------- 2 files changed, 21 insertions(+), 50 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index bd500520..a75464e4 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 730c668fa06e6a7bee288528e2f6da72dbaf349c +-- Current SHA: 4728f1d2c8986415f57d35c635ce571bf2a8311d -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -1330,20 +1330,15 @@ function QlessJob:throttles_release(now) throttles = cjson.decode(throttles or '{}') for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'releasing throttle : ' .. tid) Qless.throttle(tid):release(now, self.jid) end end function QlessJob:throttles_available() - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking throttle availability') for _, tid in ipairs(self:throttles()) do - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking availability for ' .. tid) if not Qless.throttle(tid):available() then - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle not available ' .. tid) return false end - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle available ' .. tid) end return true @@ -1351,16 +1346,13 @@ end function QlessJob:throttles_acquire(now) if not self:throttles_available() then - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttles not avaible') return false end for _, tid in ipairs(self:throttles()) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - acquiring ' .. tid) Qless.throttle(tid):acquire(self.jid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - throttles avaible') return true end @@ -1692,7 +1684,6 @@ function QlessQueue:pop(now, worker, count) return popped end - redis.call('set', 'printline', 'dead_jids : ' .. tostring(#dead_jids)) -- Now we've checked __all__ the locks for this queue the could -- have expired, and are no more than the number requested. @@ -1721,7 +1712,6 @@ function QlessQueue:pop(now, worker, count) self:pop_job(now, worker, job) table.insert(popped, jid) else - redis.call('set', 'printline', 'QlessQueue:pop - throttling ' .. job.jid) self:throttle(now, job) end end @@ -1736,11 +1726,8 @@ end -- Throttle a job function QlessQueue:throttle(now, job) self.throttled.add(now, job.jid) - redis.call('set', 'printline', 'QlessQueue:throttle - get state') local state = unpack(job:data('state')) - redis.call('set', 'printline', 'QlessQueue:throttle - check state') if state ~= 'throttled' then - redis.call('set', 'printline', 'QlessQueue:throttle - update job') job:update({state = 'throttled'}) job:history(now, 'throttled', {queue = self.name}) end @@ -1872,7 +1859,6 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) local throttles = assert(cjson.decode(options['throttles'] or '[]'), 'Put(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) - redis.call('set', 'printline', 'throttles : ' .. tostring(options['throttles'])) -- If the job has old dependencies, determine which dependencies are -- in the new dependencies but not in the old ones, and which are in the -- old ones but not in the new @@ -2005,9 +1991,14 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) self.scheduled.add(now + delay, jid) end else + -- to avoid false negatives when popping jobs check if the job should be + -- throttled immediately. + local job = Qless.job(jid) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') + elseif not job:throttles_available() then + self:throttle(now, job) else self.work.add(now, priority, jid) end @@ -2165,7 +2156,6 @@ function QlessQueue:check_recurring(now, count) -- we need to keep putting jobs on the queue local r = redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval', 'backlog', 'throttles') - redis.call('set', 'printline', cjson.encode(r)) local klass, data, priority, tags, retries, interval, backlog, throttles = unpack( redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval', 'backlog', 'throttles')) @@ -2254,13 +2244,11 @@ end function QlessQueue:check_throttled(now, count) if count == 0 then - redis.call('set', 'printline', 'count 0 not popping any throttled jobs') return end -- minus 1 since its inclusive local throttled = self.throttled.peek(now, 0, count - 1) - redis.call('set', 'printline', 'throttling the following jobs ' .. cjson.encode(throttled)) for _, jid in ipairs(throttled) do self.throttled.remove(jid) if Qless.job(jid):throttles_available() then @@ -2674,8 +2662,11 @@ function QlessThrottle:data() end -- Set the data for a throttled resource -function QlessThrottle:set(data) +function QlessThrottle:set(data, expiration) redis.call('hmset', QlessThrottle.ns .. self.id, 'id', self.id, 'maximum', data.maximum) + if expiration > 0 then + redis.call('expire', QlessThrottle.ns .. self.id, expiration) + end end -- Delete a throttled resource @@ -2686,25 +2677,20 @@ end -- Acquire a throttled resource for a job. -- Returns true of the job acquired the resource, false otherwise function QlessThrottle:acquire(jid) - redis.call('set', 'printline', 'QlessThrottle:acquire - checking availability') if not self:available() then - redis.call('set', 'printline', jid .. ' failed to acquire lock on ' .. self.id) return false end - redis.call('set', 'printline', jid .. ' acquired a lock on ' .. self.id) self.locks.add(1, jid) return true end -- Release a throttled resource. function QlessThrottle:release(now, jid) - redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) end -- Returns true if the throttle has locks available, false otherwise. function QlessThrottle:available() - redis.call('set', 'printline', self.id .. ' available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.length() < self.maximum end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 3f849e12..368e5563 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 730c668fa06e6a7bee288528e2f6da72dbaf349c +-- Current SHA: 4728f1d2c8986415f57d35c635ce571bf2a8311d -- This is a generated file local Qless = { ns = 'ql:' @@ -985,20 +985,15 @@ function QlessJob:throttles_release(now) throttles = cjson.decode(throttles or '{}') for _, tid in ipairs(throttles) do - redis.call('set', 'printline', 'releasing throttle : ' .. tid) Qless.throttle(tid):release(now, self.jid) end end function QlessJob:throttles_available() - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking throttle availability') for _, tid in ipairs(self:throttles()) do - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - checking availability for ' .. tid) if not Qless.throttle(tid):available() then - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle not available ' .. tid) return false end - redis.call('set', 'printline', 'QlessJob:throttles_available - ' .. self.jid .. ' - throttle available ' .. tid) end return true @@ -1006,16 +1001,13 @@ end function QlessJob:throttles_acquire(now) if not self:throttles_available() then - redis.call('set', 'printline', 'QlessJob:acquire_throttles - ' .. self.jid .. ' - throttles not avaible') return false end for _, tid in ipairs(self:throttles()) do - redis.call('set', 'printline', 'QlessJob:acquire_throttles - acquiring ' .. tid) Qless.throttle(tid):acquire(self.jid) end - redis.call('set', 'printline', 'QlessJob:acquire_throttles - throttles avaible') return true end @@ -1267,7 +1259,6 @@ function QlessQueue:pop(now, worker, count) return popped end - redis.call('set', 'printline', 'dead_jids : ' .. tostring(#dead_jids)) self:check_recurring(now, count - #dead_jids) @@ -1283,7 +1274,6 @@ function QlessQueue:pop(now, worker, count) self:pop_job(now, worker, job) table.insert(popped, jid) else - redis.call('set', 'printline', 'QlessQueue:pop - throttling ' .. job.jid) self:throttle(now, job) end end @@ -1295,11 +1285,8 @@ end function QlessQueue:throttle(now, job) self.throttled.add(now, job.jid) - redis.call('set', 'printline', 'QlessQueue:throttle - get state') local state = unpack(job:data('state')) - redis.call('set', 'printline', 'QlessQueue:throttle - check state') if state ~= 'throttled' then - redis.call('set', 'printline', 'QlessQueue:throttle - update job') job:update({state = 'throttled'}) job:history(now, 'throttled', {queue = self.name}) end @@ -1404,7 +1391,6 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) local throttles = assert(cjson.decode(options['throttles'] or '[]'), 'Put(): Arg "throttles" not JSON array: ' .. tostring(options['throttles'])) - redis.call('set', 'printline', 'throttles : ' .. tostring(options['throttles'])) if #depends > 0 then local new = {} for _, d in ipairs(depends) do new[d] = 1 end @@ -1505,9 +1491,12 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) self.scheduled.add(now + delay, jid) end else + local job = Qless.job(jid) if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then self.depends.add(now, jid) redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') + elseif not job:throttles_available() then + self:throttle(now, job) else self.work.add(now, priority, jid) end @@ -1633,7 +1622,6 @@ function QlessQueue:check_recurring(now, count) for index, jid in ipairs(r) do local r = redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval', 'backlog', 'throttles') - redis.call('set', 'printline', cjson.encode(r)) local klass, data, priority, tags, retries, interval, backlog, throttles = unpack( redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 'tags', 'retries', 'interval', 'backlog', 'throttles')) @@ -1699,12 +1687,10 @@ end function QlessQueue:check_throttled(now, count) if count == 0 then - redis.call('set', 'printline', 'count 0 not popping any throttled jobs') return end local throttled = self.throttled.peek(now, 0, count - 1) - redis.call('set', 'printline', 'throttling the following jobs ' .. cjson.encode(throttled)) for _, jid in ipairs(throttled) do self.throttled.remove(jid) if Qless.job(jid):throttles_available() then @@ -2004,8 +1990,11 @@ function QlessThrottle:data() return data end -function QlessThrottle:set(data) +function QlessThrottle:set(data, expiration) redis.call('hmset', QlessThrottle.ns .. self.id, 'id', self.id, 'maximum', data.maximum) + if expiration > 0 then + redis.call('expire', QlessThrottle.ns .. self.id, expiration) + end end function QlessThrottle:unset() @@ -2013,24 +2002,19 @@ function QlessThrottle:unset() end function QlessThrottle:acquire(jid) - redis.call('set', 'printline', 'QlessThrottle:acquire - checking availability') if not self:available() then - redis.call('set', 'printline', jid .. ' failed to acquire lock on ' .. self.id) return false end - redis.call('set', 'printline', jid .. ' acquired a lock on ' .. self.id) self.locks.add(1, jid) return true end function QlessThrottle:release(now, jid) - redis.call('set', 'printline', jid .. ' is releasing lock on ' .. self.id) self.locks.remove(jid) end function QlessThrottle:available() - redis.call('set', 'printline', self.id .. ' available ' .. self.maximum .. ' == 0 or ' .. self.locks.length() .. ' < ' .. self.maximum) return self.maximum == 0 or self.locks.length() < self.maximum end local QlessAPI = {} @@ -2228,11 +2212,12 @@ QlessAPI['queue.throttle.set'] = function(now, queue, max) Qless.throttle(QlessQueue.ns .. queue):set({maximum = max}) end -QlessAPI['throttle.set'] = function(now, tid, max) +QlessAPI['throttle.set'] = function(now, tid, max, ...) + local expiration = unpack(arg) local data = { maximum = max } - Qless.throttle(tid):set(data) + Qless.throttle(tid):set(data, tonumber(expiration or 0)) end QlessAPI['throttle.get'] = function(now, tid) From db57a0c1a7bca32e64428268390acab59d37cea7 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Thu, 13 Mar 2014 08:33:38 -0400 Subject: [PATCH 14/83] Add API to set throttle expiration and retrieve throttle TTL --- lib/qless/lua/qless-lib.lua | 7 ++++++- lib/qless/lua/qless.lua | 12 ++++++++++-- lib/qless/throttle.rb | 14 +++++++++++++- spec/unit/throttle_spec.rb | 7 +++++++ 4 files changed, 36 insertions(+), 4 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index a75464e4..63fb7ebc 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 4728f1d2c8986415f57d35c635ce571bf2a8311d +-- Current SHA: 85350ea68c9836b810226dc737c08ed65e2c741b -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -2694,3 +2694,8 @@ end function QlessThrottle:available() return self.maximum == 0 or self.locks.length() < self.maximum end + +-- Returns the TTL of the throttle +function QlessThrottle:ttl() + return redis.call('ttl', QlessThrottle.ns .. self.id) +end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 368e5563..4a1ea38f 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 4728f1d2c8986415f57d35c635ce571bf2a8311d +-- Current SHA: 85350ea68c9836b810226dc737c08ed65e2c741b -- This is a generated file local Qless = { ns = 'ql:' @@ -2017,6 +2017,10 @@ end function QlessThrottle:available() return self.maximum == 0 or self.locks.length() < self.maximum end + +function QlessThrottle:ttl() + return redis.call('ttl', QlessThrottle.ns .. self.id) +end local QlessAPI = {} function QlessAPI.get(now, jid) @@ -2209,7 +2213,7 @@ QlessAPI['queue.throttle.get'] = function(now, queue) end QlessAPI['queue.throttle.set'] = function(now, queue, max) - Qless.throttle(QlessQueue.ns .. queue):set({maximum = max}) + Qless.throttle(QlessQueue.ns .. queue):set({maximum = max}, 0) end QlessAPI['throttle.set'] = function(now, tid, max, ...) @@ -2232,6 +2236,10 @@ QlessAPI['throttle.locks'] = function(now, tid) return Qless.throttle(tid).locks.members() end +QlessAPI['throttle.ttl'] = function(now, tid) + return Qless.throttle(tid):ttl() +end + if #KEYS > 0 then error('No Keys should be provided') end local command_name = assert(table.remove(ARGV, 1), 'Must provide a command') diff --git a/lib/qless/throttle.rb b/lib/qless/throttle.rb index acde6e7a..36aa67d8 100644 --- a/lib/qless/throttle.rb +++ b/lib/qless/throttle.rb @@ -16,6 +16,10 @@ def delete @client.call('throttle.delete', @name) end + def expiration=(expire_time_in_seconds) + update(nil, Integer(expire_time_in_seconds)) + end + def id @name end @@ -29,18 +33,26 @@ def maximum end def maximum=(max) - @client.call('throttle.set', @name, max) + update(max) end def pending @client.call('throttle.pending', @name) end + def ttl + @client.call('throttle.ttl', @name) + end + private def throttle_attrs throttle_json = @client.call('throttle.get', @name) throttle_json ? JSON.parse(throttle_json) : {} end + def update(max, expiration = 0) + @client.call('throttle.set', @name, max || maximum, expiration) + end + end end diff --git a/spec/unit/throttle_spec.rb b/spec/unit/throttle_spec.rb index 4dc26d5c..638df3d5 100644 --- a/spec/unit/throttle_spec.rb +++ b/spec/unit/throttle_spec.rb @@ -37,6 +37,13 @@ module Qless t.maximum.should eq(5) end + it "can set the throttle's expiration and retrieve it's ttl" do + t = Throttle.new('name', client) + t.ttl.should be < 0 + t.expiration = 5 + t.ttl.should be > 0 + end + it "handles throttle names as a String or Symbol" do t = Throttle.new('name', client) t.maximum = 5 From d16781752824ecba07319e0995bfdcba46e63362 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Thu, 13 Mar 2014 14:11:30 -0400 Subject: [PATCH 15/83] Update lua scripts --- lib/qless/lua/qless-lib.lua | 4 ++-- lib/qless/lua/qless.lua | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 63fb7ebc..6a298bda 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 85350ea68c9836b810226dc737c08ed65e2c741b +-- Current SHA: 9a372256e2b9cfb07f9a4d4becc36e2b6d262ba0 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -1359,7 +1359,7 @@ end function QlessJob:throttles() -- memoize throttles for the job. if not self._throttles then - self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles') or '[]') end return self._throttles diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 4a1ea38f..92359ca2 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 85350ea68c9836b810226dc737c08ed65e2c741b +-- Current SHA: 9a372256e2b9cfb07f9a4d4becc36e2b6d262ba0 -- This is a generated file local Qless = { ns = 'ql:' @@ -1013,7 +1013,7 @@ end function QlessJob:throttles() if not self._throttles then - self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles')) + self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles') or '[]') end return self._throttles From 37ea6cc185c6b130577ecdb4786fc32b7a24ed67 Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Fri, 14 Mar 2014 08:53:46 -0400 Subject: [PATCH 16/83] expose throttles throughout UI --- lib/qless/queue.rb | 4 ++ lib/qless/server.rb | 2 +- lib/qless/server/views/queue.erb | 6 +- lib/qless/server/views/queues.erb | 4 +- lib/qless/server/views/track.erb | 6 ++ spec/integration/server_spec.rb | 98 +++++++++++++++++++++++++++++-- 6 files changed, 111 insertions(+), 9 deletions(-) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index ebc730b3..b0998b68 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -17,6 +17,10 @@ def running(start = 0, count = 25) @client.call('jobs', 'running', @name, start, count) end + def throttled(start = 0, count = 25) + @client.call('jobs', 'throttled', @name, start, count) + end + def stalled(start = 0, count = 25) @client.call('jobs', 'stalled', @name, start, count) end diff --git a/lib/qless/server.rb b/lib/qless/server.rb index 7894e3cc..38af8ba9 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -173,7 +173,7 @@ def strftime(t) json(client.queues[params[:name]].counts) end - filtered_tabs = %w[ running scheduled stalled depends recurring ].to_set + filtered_tabs = %w[ running throttled scheduled stalled depends recurring ].to_set get '/queues/:name/?:tab?' do queue = client.queues[params[:name]] tab = params.fetch('tab', 'stats') diff --git a/lib/qless/server/views/queue.erb b/lib/qless/server/views/queue.erb index 14e5b2f3..9f2e5f9b 100644 --- a/lib/qless/server/views/queue.erb +++ b/lib/qless/server/views/queue.erb @@ -43,6 +43,7 @@
  • ">Stats
  • ">Running
  • ">Waiting
  • +
  • ">Throttled
  • ">Scheduled
  • ">Stalled
  • ">Depends
  • @@ -57,9 +58,10 @@

    "><%= queue['name'] %> | <%= queue['running'] %> / <%= queue['waiting'] %> / + <%= queue['throttled'] %> / <%= queue['scheduled'] %> / <%= queue['stalled'] %> / - <%= queue['depends'] %> (running / waiting / scheduled / stalled / depends) + <%= queue['depends'] %> (running / waiting / throttled / scheduled / stalled / depends)

    @@ -74,7 +76,7 @@ -<% if ['running', 'waiting', 'scheduled', 'stalled', 'depends', 'recurring'].include?(tab) %> +<% if ['running', 'waiting', 'throttled', 'scheduled', 'stalled', 'depends', 'recurring'].include?(tab) %>
    <%= erb :_job_list, :locals => { :jobs => jobs, :queues => queues } %> <% else %> diff --git a/lib/qless/server/views/queues.erb b/lib/qless/server/views/queues.erb index 8509d849..fd3bb33a 100644 --- a/lib/qless/server/views/queues.erb +++ b/lib/qless/server/views/queues.erb @@ -33,9 +33,11 @@

    | <%= queue['running'] %> / <%= queue['waiting'] %> / + <%= queue['throttled'] %> / <%= queue['scheduled'] %> / <%= queue['stalled'] %> / - <%= queue['depends'] %> (running / waiting / scheduled / stalled / depends) + <%= queue['depends'] %> / + <%= queue['recurring'] %> (running / waiting / throttled / scheduled / stalled / depends / recurring)

    diff --git a/lib/qless/server/views/track.erb b/lib/qless/server/views/track.erb index 7a9d4439..617f341c 100644 --- a/lib/qless/server/views/track.erb +++ b/lib/qless/server/views/track.erb @@ -11,6 +11,7 @@ var fade = function(jid, type) {
  • All (<%= tracked['jobs'].length %>)
  • Running (<%= tracked['jobs'].select { |job| job.state == 'running' }.length %>)
  • Waiting (<%= tracked['jobs'].select { |job| job.state == 'waiting' }.length %>)
  • +
  • Throttled (<%= tracked['jobs'].select { |job| job.state == 'throttled'}.length %>)
  • Scheduled (<%= tracked['jobs'].select { |job| job.state == 'scheduled' }.length %>)
  • Stalled (<%= tracked['jobs'].select { |job| job.state == 'stalled' }.length %>)
  • Completed (<%= tracked['jobs'].select { |job| job.state == 'complete' }.length %>)
  • @@ -41,6 +42,11 @@ var fade = function(jid, type) { <%= erb :_job, :layout => false, :locals => { :job => job, :queues => queues } %> <% end %> +
    + <% tracked['jobs'].select { |job| job.state == 'throttled' }.each do |job| %> + <%= erb :_job, :layout => false, :locals => { :job => job, :queues => queues } %> + <% end %> +
    <% tracked['jobs'].select { |job| job.state == 'scheduled' }.each do |job| %> <%= erb :_job, :layout => false, :locals => { :job => job, :queues => queues } %> diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 7caed7a5..9eeb849f 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -134,23 +134,34 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) q.put(Qless::Job, {}) visit '/' first('.queue-row', text: /testing/).should be - first('.queue-row', text: /0\D+1\D+0\D+0\D+0/).should be + first('.queue-row', text: /0\D+1\D+0\D+0\D+0\D+0\D+0/).should be first('h1', text: /no queues/i).should be_nil first('h1', text: /queues and their job counts/i).should be # Let's pop the job, and make sure that we can see /that/ job = q.pop visit '/' - first('.queue-row', text: /1\D+0\D+0\D+0\D+0/).should be + first('.queue-row', text: /1\D+0\D+0\D+0\D+0\D+0\D+0/).should be first('.worker-row', text: q.worker_name).should be first('.worker-row', text: /1\D+0/i).should be # Let's complete the job, and make sure it disappears job.complete visit '/' - first('.queue-row', text: /0\D+0\D+0\D+0\D+0/).should be + first('.queue-row', text: /0\D+0\D+0\D+0\D+0\D+0\D+0/).should be first('.worker-row', text: /0\D+0/i).should be + # Let's throttle a job, and make sure we see it + client.throttles['one'].maximum = 1 + q.put(Qless::Job, {}, :throttles => ["one"]) + q.put(Qless::Job, {}, :throttles => ["one"]) + job1 = q.pop + job2 = q.pop + visit '/' + first('.queue-row', text: /1\D+0\D+1\D+0\D+0\D+0\D+0/).should be + job1.complete + q.pop.complete + # Let's put and pop and fail a job, and make sure we see it q.put(Qless::Job, {}) job = q.pop @@ -162,11 +173,11 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) # And let's have one scheduled, and make sure it shows up accordingly jid = q.put(Qless::Job, {}, delay: 60) visit '/' - first('.queue-row', text: /0\D+0\D+1\D+0\D+0/).should be + first('.queue-row', text: /0\D+0\D+0\D+1\D+0\D+0\D+0/).should be # And one that depends on that job q.put(Qless::Job, {}, depends: [jid]) visit '/' - first('.queue-row', text: /0\D+0\D+1\D+0\D+1/).should be + first('.queue-row', text: /0\D+0\D+0\D+1\D+0\D+1\D+0/).should be end it 'can visit the tracked page' do @@ -193,6 +204,17 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) first('a', text: /completed\W+1/i).should be job.untrack + # And now for a throttled job + client.throttles['one'].maximum = 1 + q.put(Qless::Job, {}, throttles: ["one"]) + job = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + job.track + q.pop(2) + visit '/track' + first('a', text: /all\W+1/i).should be + first('a', text: /throttled\W+1/i).should be + job.untrack + # And now for a scheduled job job = client.jobs[q.put(Qless::Job, {}, delay: 600)] job.track @@ -634,6 +656,52 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) groups.map { |g| g.text }.join(' ').should eq('e j i h g f d c b a') end + it 'can visit /queues' do + # We should be able to see all of the appropriate tabs, + # We should be able to see all of the jobs + jid = q.put(Qless::Job, {}) + + # We should see this job + visit '/queues' + first('h3', text: /0\D+1\D+0\D+0\D+0\D+0\D+0/).should be + + # Now let's pop off the job so that it's running + job = q.pop + visit '/queues' + first('h3', text: /1\D+0\D+0\D+0\D+0\D+0\D+0/).should be + job.complete + + # And now for a throttled job + client.throttles['one'].maximum = 1 + job1 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + job2 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + q.pop(2) + visit '/queues' + first('h3', text: /1\D+0\D+1\D+0\D+0\D+0\D+0/).should be + job1.cancel + job2.cancel + + # And now for a scheduled job + job = client.jobs[q.put(Qless::Job, {}, delay: 600)] + visit '/queues' + first('h3', text: /0\D+0\D+0\D+1\D+0\D+0\D+0/).should be + job.cancel + + # And now a dependent job + job1 = client.jobs[q.put(Qless::Job, {})] + job2 = client.jobs[q.put(Qless::Job, {}, depends: [job1.jid])] + visit '/queues' + first('h3', text: /0\D+1\D+0\D+0\D+0\D+1\D+0/).should be + job2.cancel + job1.cancel + + # And now a recurring job + job = client.jobs[q.recur(Qless::Job, {}, 5)] + visit '/queues' + first('h3', text: /0\D+0\D+0\D+0\D+0\D+0\D+1/).should be + job.cancel + end + it 'can visit the various /queues/* endpoints' do # We should be able to see all of the appropriate tabs, # We should be able to see all of the jobs @@ -648,6 +716,16 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) first('h2', text: /#{jid[0...8]}/).should be job.complete + # And now for a throttled job + client.throttles['one'].maximum = 1 + job1 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + job2 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + q.pop(2) + visit '/queues/testing/throttled' + first('h2', text: /#{job2.jid[0...8]}/).should be + job1.cancel + job2.cancel + # And now for a scheduled job job = client.jobs[q.put(Qless::Job, {}, delay: 600)] visit '/queues/testing/scheduled' @@ -690,6 +768,16 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) job.untrack first('.tracked-row', text: /complete/i).should be + # And now for a throttled job + client.throttles['one'].maximum = 1 + job1 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + job2 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] + job2.track + q.pop(2) + visit '/' + first('.tracked-row', text: /throttled/i).should be + job2.untrack + # And now for a scheduled job job = client.jobs[q.put(Qless::Job, {}, delay: 600)] job.track From 00fcec6b851911c5290c36435fe90ad77806afed Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Fri, 14 Mar 2014 10:43:40 -0400 Subject: [PATCH 17/83] fix test till cancel is fixed in qless --- spec/integration/server_spec.rb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 9eeb849f..2d7ee8dd 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -673,13 +673,13 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) # And now for a throttled job client.throttles['one'].maximum = 1 - job1 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] - job2 = client.jobs[q.put(Qless::Job, {}, throttles: ["one"])] - q.pop(2) + q.put(Qless::Job, {}, throttles: ["one"]) + q.put(Qless::Job, {}, throttles: ["one"]) + job1, job2 = q.pop(2) visit '/queues' first('h3', text: /1\D+0\D+1\D+0\D+0\D+0\D+0/).should be - job1.cancel - job2.cancel + job1.complete + q.pop.complete # And now for a scheduled job job = client.jobs[q.put(Qless::Job, {}, delay: 600)] From b22a90d27b44221435dd265be89c5ac9ca0cfc3d Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 2 Apr 2014 08:03:51 -0400 Subject: [PATCH 18/83] updated qless core lua scripts --- lib/qless/lua/qless-lib.lua | 136 +++++++++++++++++++++++------------- lib/qless/lua/qless.lua | 121 +++++++++++++++++++++----------- 2 files changed, 168 insertions(+), 89 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 6a298bda..f2404a9b 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 9a372256e2b9cfb07f9a4d4becc36e2b6d262ba0 +-- Current SHA: a9c3b988a5e3150a5d01b698d882bdbf8b264c42 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -98,6 +98,27 @@ function Qless.throttle(tid) end end, pop = function(min, max) return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-locks', min, max) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', min, max) + end + } + + -- set of jids which are waiting for the throttle to become available. + throttle.pending = { + length = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) + end, add = function(now, jid) + redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', now, jid) + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) end } @@ -388,7 +409,6 @@ function Qless.cancel(now, ...) -- If we've made it this far, then we are good to go. We can now just -- remove any trace of all these jobs, as they form a dependent clique for _, jid in ipairs(arg) do - local namespaced_jid = QlessJob.ns .. jid -- Find any stage it's associated with and remove its from that stage local state, queue, failure, worker = unpack(redis.call( 'hmget', QlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker')) @@ -413,13 +433,10 @@ function Qless.cancel(now, ...) -- Remove it from that queue if queue then local queue = Qless.queue(queue) - queue.work.remove(jid) - queue.locks.remove(jid) - queue.scheduled.remove(jid) - queue.depends.remove(jid) + queue:remove_job(jid) end - Qless.job(namespaced_jid):throttles_release(now) + Qless.job(jid):throttles_release(now) -- We should probably go through all our dependencies and remove -- ourselves from the list of dependents @@ -654,9 +671,7 @@ function QlessJob:complete(now, worker, queue, data, ...) -- Remove the job from the previous queue local queue_obj = Qless.queue(queue) - queue_obj.work.remove(self.jid) - queue_obj.locks.remove(self.jid) - queue_obj.scheduled.remove(self.jid) + queue_obj:remove_job(self.jid) self:throttles_release(now) @@ -668,7 +683,7 @@ function QlessJob:complete(now, worker, queue, data, ...) local time = tonumber( redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) local waiting = now - time - Qless.queue(queue):stat(now, 'run', waiting) + queue_obj:stat(now, 'run', waiting) redis.call('hset', QlessJob.ns .. self.jid, 'time', string.format("%.20f", now)) @@ -906,9 +921,7 @@ function QlessJob:fail(now, worker, group, message, data) -- Now remove the instance from the schedule, and work queues for the -- queue it's in local queue_obj = Qless.queue(queue) - queue_obj.work.remove(self.jid) - queue_obj.locks.remove(self.jid) - queue_obj.scheduled.remove(self.jid) + queue_obj:remove_job(self.jid) -- The reason that this appears here is that the above will fail if the -- job doesn't exist @@ -1327,7 +1340,7 @@ end function QlessJob:throttles_release(now) local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') - throttles = cjson.decode(throttles or '{}') + throttles = cjson.decode(throttles or '[]') for _, tid in ipairs(throttles) do Qless.throttle(tid):release(now, self.jid) @@ -1356,6 +1369,17 @@ function QlessJob:throttles_acquire(now) return true end +-- Finds the first unavailable throttle and adds the job to its pending job set. +function QlessJob:throttle(now) + for _, tid in ipairs(self:throttles()) do + local throttle = Qless.throttle(tid) + if not throttle:available() then + throttle:pend(now, self.jid) + return + end + end +end + function QlessJob:throttles() -- memoize throttles for the job. if not self._throttles then @@ -1697,11 +1721,6 @@ function QlessQueue:pop(now, worker, count) -- unit of work. self:check_scheduled(now, count - #dead_jids) - -- If we still need values in order to meet the demand, check our throttled - -- jobs. This has the side benefit of naturally updating other throttles - -- on the jobs checked. - self:check_throttled(now, count - #dead_jids) - -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein local jids = self.work.peek(count - #dead_jids) or {} @@ -1725,6 +1744,7 @@ end -- Throttle a job function QlessQueue:throttle(now, job) + job:throttle(now) self.throttled.add(now, job.jid) local state = unpack(job:data('state')) if state ~= 'throttled' then @@ -1746,8 +1766,7 @@ function QlessQueue:pop_job(now, worker, job) -- Update the wait time statistics -- Just does job:data('time') do the same as this? - local time = tonumber( - redis.call('hget', QlessJob.ns .. jid, 'time') or now) + local time = tonumber(redis.call('hget', QlessJob.ns .. jid, 'time') or now) local waiting = now - time self:stat(now, 'wait', waiting) redis.call('hset', QlessJob.ns .. jid, @@ -1892,10 +1911,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) -- If this item was previously in another queue, then we should remove it from there if oldqueue then local queue_obj = Qless.queue(oldqueue) - queue_obj.work.remove(jid) - queue_obj.locks.remove(jid) - queue_obj.depends.remove(jid) - queue_obj.scheduled.remove(jid) + queue_obj:remove_job(jid) end -- If this had previously been given out to a worker, make sure to remove it @@ -2143,6 +2159,14 @@ end ------------------------------------------------------------------------------- -- Housekeeping methods ------------------------------------------------------------------------------- +function QlessQueue:remove_job(jid) + self.work.remove(jid) + self.locks.remove(jid) + self.throttled.remove(jid) + self.depends.remove(jid) + self.scheduled.remove(jid) +end + -- Instantiate any recurring jobs that are ready function QlessQueue:check_recurring(now, count) -- This is how many jobs we've moved so far @@ -2242,27 +2266,6 @@ function QlessQueue:check_scheduled(now, count) end end -function QlessQueue:check_throttled(now, count) - if count == 0 then - return - end - - -- minus 1 since its inclusive - local throttled = self.throttled.peek(now, 0, count - 1) - for _, jid in ipairs(throttled) do - self.throttled.remove(jid) - if Qless.job(jid):throttles_available() then - local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) - self.work.add(now, priority, jid) - self.throttled.remove(jid) - else - -- shift jid to end of throttled jobs - -- use current time to make sure it gets added to the end of the sorted set. - self.throttled.add(now, jid) - end - end -end - -- Check for and invalidate any locks that have been lost. Returns the -- list of jids that have been invalidated function QlessQueue:invalidate_locks(now, count) @@ -2685,9 +2688,34 @@ function QlessThrottle:acquire(jid) return true end --- Release a throttled resource. +function QlessThrottle:pend(now, jid) + self.pending.add(now, jid) +end + +-- Releases the lock taken by the specified jid. +-- number of jobs released back into the queues is determined by the locks_available method. function QlessThrottle:release(now, jid) self.locks.remove(jid) + + local available_locks = self:locks_available() + if self.pending.length() == 0 or available_locks < 1 then + return + end + + -- subtract one to ensure we pop the correct amount. peek(0, 0) returns the first element + -- peek(0,1) return the first two. + for _, jid in ipairs(self.pending.peek(0, available_locks - 1)) do + local job = Qless.job(jid) + local data = job:data() + local queue = Qless.queue(data['queue']) + + queue.throttled.remove(jid) + queue.work.add(now, data.priority, jid) + end + + -- subtract one to ensure we pop the correct amount. pop(0, 0) pops the first element + -- pop(0,1) pops the first two. + local popped = self.pending.pop(0, available_locks - 1) end -- Returns true if the throttle has locks available, false otherwise. @@ -2699,3 +2727,15 @@ end function QlessThrottle:ttl() return redis.call('ttl', QlessThrottle.ns .. self.id) end + +-- Returns the number of locks available for the throttle. +-- calculated by maximum - locks.length(), if the throttle is unlimited +-- then up to 10 jobs are released. +function QlessThrottle:locks_available() + if self.maximum == 0 then + -- Arbitrarily chosen value. might want to make it configurable in the future. + return 10 + end + + return self.maximum - self.locks.length() +end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 92359ca2..1beb1531 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 9a372256e2b9cfb07f9a4d4becc36e2b6d262ba0 +-- Current SHA: a9c3b988a5e3150a5d01b698d882bdbf8b264c42 -- This is a generated file local Qless = { ns = 'ql:' @@ -81,6 +81,26 @@ function Qless.throttle(tid) end end, pop = function(min, max) return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-locks', min, max) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-locks', min, max) + end + } + + throttle.pending = { + length = function() + return (redis.call('zcard', QlessThrottle.ns .. tid .. '-pending') or 0) + end, members = function() + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', 0, -1) + end, add = function(now, jid) + redis.call('zadd', QlessThrottle.ns .. tid .. '-pending', now, jid) + end, remove = function(...) + if #arg > 0 then + return redis.call('zrem', QlessThrottle.ns .. tid .. '-pending', unpack(arg)) + end + end, pop = function(min, max) + return redis.call('zremrangebyrank', QlessThrottle.ns .. tid .. '-pending', min, max) + end, peek = function(min, max) + return redis.call('zrange', QlessThrottle.ns .. tid .. '-pending', min, max) end } @@ -263,7 +283,6 @@ function Qless.cancel(now, ...) end for _, jid in ipairs(arg) do - local namespaced_jid = QlessJob.ns .. jid local state, queue, failure, worker = unpack(redis.call( 'hmget', QlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker')) @@ -283,13 +302,10 @@ function Qless.cancel(now, ...) if queue then local queue = Qless.queue(queue) - queue.work.remove(jid) - queue.locks.remove(jid) - queue.scheduled.remove(jid) - queue.depends.remove(jid) + queue:remove_job(jid) end - Qless.job(namespaced_jid):throttles_release(now) + Qless.job(jid):throttles_release(now) for i, j in ipairs(redis.call( 'smembers', QlessJob.ns .. jid .. '-dependencies')) do @@ -467,16 +483,14 @@ function QlessJob:complete(now, worker, queue, data, ...) end local queue_obj = Qless.queue(queue) - queue_obj.work.remove(self.jid) - queue_obj.locks.remove(self.jid) - queue_obj.scheduled.remove(self.jid) + queue_obj:remove_job(self.jid) self:throttles_release(now) local time = tonumber( redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) local waiting = now - time - Qless.queue(queue):stat(now, 'run', waiting) + queue_obj:stat(now, 'run', waiting) redis.call('hset', QlessJob.ns .. self.jid, 'time', string.format("%.20f", now)) @@ -656,9 +670,7 @@ function QlessJob:fail(now, worker, group, message, data) redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , 1) local queue_obj = Qless.queue(queue) - queue_obj.work.remove(self.jid) - queue_obj.locks.remove(self.jid) - queue_obj.scheduled.remove(self.jid) + queue_obj:remove_job(self.jid) if data then redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data)) @@ -982,7 +994,7 @@ end function QlessJob:throttles_release(now) local throttles = redis.call('hget', QlessJob.ns .. self.jid, 'throttles') - throttles = cjson.decode(throttles or '{}') + throttles = cjson.decode(throttles or '[]') for _, tid in ipairs(throttles) do Qless.throttle(tid):release(now, self.jid) @@ -1011,6 +1023,16 @@ function QlessJob:throttles_acquire(now) return true end +function QlessJob:throttle(now) + for _, tid in ipairs(self:throttles()) do + local throttle = Qless.throttle(tid) + if not throttle:available() then + throttle:pend(now, self.jid) + return + end + end +end + function QlessJob:throttles() if not self._throttles then self._throttles = cjson.decode(redis.call('hget', QlessJob.ns .. self.jid, 'throttles') or '[]') @@ -1264,8 +1286,6 @@ function QlessQueue:pop(now, worker, count) self:check_scheduled(now, count - #dead_jids) - self:check_throttled(now, count - #dead_jids) - local jids = self.work.peek(count - #dead_jids) or {} for index, jid in ipairs(jids) do @@ -1284,6 +1304,7 @@ function QlessQueue:pop(now, worker, count) end function QlessQueue:throttle(now, job) + job:throttle(now) self.throttled.add(now, job.jid) local state = unpack(job:data('state')) if state ~= 'throttled' then @@ -1302,8 +1323,7 @@ function QlessQueue:pop_job(now, worker, job) Qless.config.get(self.name .. '-heartbeat') or Qless.config.get('heartbeat', 60)) - local time = tonumber( - redis.call('hget', QlessJob.ns .. jid, 'time') or now) + local time = tonumber(redis.call('hget', QlessJob.ns .. jid, 'time') or now) local waiting = now - time self:stat(now, 'wait', waiting) redis.call('hset', QlessJob.ns .. jid, @@ -1415,10 +1435,7 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if oldqueue then local queue_obj = Qless.queue(oldqueue) - queue_obj.work.remove(jid) - queue_obj.locks.remove(jid) - queue_obj.depends.remove(jid) - queue_obj.scheduled.remove(jid) + queue_obj:remove_job(jid) end if oldworker and oldworker ~= '' then @@ -1616,6 +1633,14 @@ function QlessQueue:length() return self.locks.length() + self.work.length() + self.scheduled.length() end +function QlessQueue:remove_job(jid) + self.work.remove(jid) + self.locks.remove(jid) + self.throttled.remove(jid) + self.depends.remove(jid) + self.scheduled.remove(jid) +end + function QlessQueue:check_recurring(now, count) local moved = 0 local r = self.recurring.peek(now, 0, count) @@ -1685,24 +1710,6 @@ function QlessQueue:check_scheduled(now, count) end end -function QlessQueue:check_throttled(now, count) - if count == 0 then - return - end - - local throttled = self.throttled.peek(now, 0, count - 1) - for _, jid in ipairs(throttled) do - self.throttled.remove(jid) - if Qless.job(jid):throttles_available() then - local priority = tonumber(redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) - self.work.add(now, priority, jid) - self.throttled.remove(jid) - else - self.throttled.add(now, jid) - end - end -end - function QlessQueue:invalidate_locks(now, count) local jids = {} for index, jid in ipairs(self.locks.expired(now, 0, count)) do @@ -2010,8 +2017,28 @@ function QlessThrottle:acquire(jid) return true end +function QlessThrottle:pend(now, jid) + self.pending.add(now, jid) +end + function QlessThrottle:release(now, jid) self.locks.remove(jid) + + local available_locks = self:locks_available() + if self.pending.length() == 0 or available_locks < 1 then + return + end + + for _, jid in ipairs(self.pending.peek(0, available_locks - 1)) do + local job = Qless.job(jid) + local data = job:data() + local queue = Qless.queue(data['queue']) + + queue.throttled.remove(jid) + queue.work.add(now, data.priority, jid) + end + + local popped = self.pending.pop(0, available_locks - 1) end function QlessThrottle:available() @@ -2021,6 +2048,14 @@ end function QlessThrottle:ttl() return redis.call('ttl', QlessThrottle.ns .. self.id) end + +function QlessThrottle:locks_available() + if self.maximum == 0 then + return 10 + end + + return self.maximum - self.locks.length() +end local QlessAPI = {} function QlessAPI.get(now, jid) @@ -2236,6 +2271,10 @@ QlessAPI['throttle.locks'] = function(now, tid) return Qless.throttle(tid).locks.members() end +QlessAPI['throttle.pending'] = function(now, tid) + return Qless.throttle(tid).pending.members() +end + QlessAPI['throttle.ttl'] = function(now, tid) return Qless.throttle(tid):ttl() end From ce13508c4340d78a5ec90ab2116b32e5de273d0d Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Thu, 17 Apr 2014 13:24:43 -0400 Subject: [PATCH 19/83] update throttle access for Qless::Queue --- lib/qless/queue.rb | 11 ++++++++--- spec/integration/queue_spec.rb | 4 ++++ spec/unit/queue_spec.rb | 8 ++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index b0998b68..f8102a8b 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -69,13 +69,18 @@ def heartbeat=(value) set_config :heartbeat, value end + def throttle + @throttle ||= Qless::Throttle.new("ql:q:#{name}", client) + end + def max_concurrency - value = JSON.parse(@client.call('queue.throttle.get', @name))['maximum'] - value && Integer(value) + warn "[DEPRECATED - 4/17/14] `max_concurrency` is deprecated. Use `throttle.maximum` instead." + throttle.maximum end def max_concurrency=(value) - @client.call('queue.throttle.set', @name, value) + warn "[DEPRECATED - 4/17/14] `max_concurrency=` is deprecated. Use `throttle.maximum=` instead." + throttle.maximum = value end def paused? diff --git a/spec/integration/queue_spec.rb b/spec/integration/queue_spec.rb index b86482ed..7d43ddeb 100644 --- a/spec/integration/queue_spec.rb +++ b/spec/integration/queue_spec.rb @@ -75,6 +75,10 @@ module Qless pending('this is specific to ruby') end + it 'exposes a throttle' do + expect(queue.throttle).to be + end + it 'exposes max concurrency' do queue.max_concurrency = 5 expect(queue.max_concurrency).to eq(5) diff --git a/spec/unit/queue_spec.rb b/spec/unit/queue_spec.rb index 1061f2d5..6d3807f4 100644 --- a/spec/unit/queue_spec.rb +++ b/spec/unit/queue_spec.rb @@ -94,6 +94,14 @@ def enqueue(q, klass, data, opts = {}) include_examples 'job options' end + describe "#throttle" do + let(:q) { Queue.new('a_queue', client) } + + it "returns a Qless::Throttle" do + expect(q.throttle).to be_a(Qless::Throttle) + end + end + describe "equality" do it 'is considered equal when the qless client and name are equal' do q1 = Qless::Queue.new('foo', client) From c96a70ad5882ff4346d66ee8691a3b0959b0015d Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Tue, 22 Apr 2014 17:42:16 -0400 Subject: [PATCH 20/83] basic throttle management ui --- lib/qless.rb | 6 +++++ lib/qless/server.rb | 33 ++++++++++++++++++++++++++++ lib/qless/server/views/layout.erb | 28 +++++++++++++++++++++++ lib/qless/server/views/throttles.erb | 33 ++++++++++++++++++++++++++++ lib/qless/throttle.rb | 1 - spec/integration/server_spec.rb | 24 ++++++++++++++++++++ 6 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 lib/qless/server/views/throttles.erb diff --git a/lib/qless.rb b/lib/qless.rb index 09da6631..970c4858 100644 --- a/lib/qless.rb +++ b/lib/qless.rb @@ -146,6 +146,12 @@ def initialize(client) def [](name) Throttle.new(name, @client) end + + def counts + @client.queues.counts.map do |queue| + Throttle.new(queue['name'], @client) + end + end end # A class for interacting with events. Not meant to be instantiated directly, diff --git a/lib/qless/server.rb b/lib/qless/server.rb index 38af8ba9..06562eae 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -78,6 +78,7 @@ def paginated(qless_object, method, *args) def tabs [ { name: 'Queues' , path: '/queues' }, + { name: 'Throttles', path: '/throttles'}, { name: 'Workers' , path: '/workers' }, { name: 'Track' , path: '/track' }, { name: 'Failed' , path: '/failed' }, @@ -95,6 +96,10 @@ def queues client.queues.counts end + def throttles + client.throttles.counts + end + def tracked client.jobs.tracked end @@ -194,6 +199,13 @@ def strftime(t) } end + get '/throttles/?' do + erb :throttles, layout: true, locals: { + title: 'Throttles' + #throttles: @client.throttles + } + end + get '/failed.json' do json(client.jobs.failed) end @@ -481,6 +493,27 @@ def strftime(t) end end + post '/delete_throttle' do + data = JSON.parse(request.body.read) + if data['id'].nil? + halt 400, 'Need throttle id' + else + throttle = Throttle.new(data['id'], client) + throttle.delete + return json({id: throttle.id, maximum: throttle.maximum}) + end + end + + post '/update_throttle' do + data = JSON.parse(request.body.read) + if data['id'].nil? || data['maximum'].nil? + halt 400, 'Need throttle id and maximum value' + else + throttle = Throttle.new(data['id'], client) + throttle.maximum = data['maximum'] + end + end + # start the server if ruby file executed directly run! if app_file == $PROGRAM_NAME end diff --git a/lib/qless/server/views/layout.erb b/lib/qless/server/views/layout.erb index 3375a8b6..d5c521a5 100644 --- a/lib/qless/server/views/layout.erb +++ b/lib/qless/server/views/layout.erb @@ -331,6 +331,34 @@ }) } + var delete_throttle = function(throttle_id) { + _ajax({ + url: '<%= u "/delete_throttle" %>', + data: { + 'id': throttle_id + }, success: function(data) { + flash('Deleted throttle for ' + throttle_id, 'success'); + $('.throttle-' + throttle_id).val(data['maximum']); + }, error: function(data) { + flash('Couldn\'t delete thottle ' + throttle_id); + } + }) + } + + var update_throttle = function(throttle_id, maximum) { + _ajax({ + url: '<%= u "/update_throttle" %>', + data: { + 'id': throttle_id, + 'maximum': maximum + }, success: function(data) { + flash('Updated throttle for ' + throttle_id, 'success'); + }, error: function(data) { + flash('Couldn\'t update throttle ' + throttle_id); + } + }) + } + $(document).ready(function() { $('button').tooltip({delay:200}); }); diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb new file mode 100644 index 00000000..5b56744f --- /dev/null +++ b/lib/qless/server/views/throttles.erb @@ -0,0 +1,33 @@ +<% if client.throttles.counts.empty? %> + +<% else %> + + + <% client.throttles.counts.each do |throttle| %> +
    +
    +

    + <%= throttle.id %> +

    +
    +
    +
    +
    + +
    +
    +
    +
    +
    + +
    +
    +
    + <% end %> +<% end %> diff --git a/lib/qless/throttle.rb b/lib/qless/throttle.rb index 36aa67d8..37a00b87 100644 --- a/lib/qless/throttle.rb +++ b/lib/qless/throttle.rb @@ -53,6 +53,5 @@ def throttle_attrs def update(max, expiration = 0) @client.call('throttle.set', @name, max || maximum, expiration) end - end end diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 2d7ee8dd..b0684467 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -122,6 +122,30 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) test_pagination end + it 'can set and delete throttles for all the queues', js: true do + q.put(Qless::Job, {}) + + Throttle.new('testing', client).maximum.should eq(0) + + visit '/throttles' + + first('h3', text: /testing/i).should be + first('.throttle-testing', placeholder: /0/i).should be + + maximum = first('.throttle-testing') + maximum.set(3) + maximum.trigger('blur'); + + first('.throttle-testing', value: /3/i).should be + + Throttle.new('testing', client).maximum.should eq(3) + + first('button.btn-danger').click + first('button.btn-danger').click + + Throttle.new('testing', client).maximum.should eq(0) + end + it 'can see the root-level summary' do visit '/' From f539337bcf20e6811ebcbe8ac5e065d712fd7a0a Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Wed, 23 Apr 2014 10:34:07 -0400 Subject: [PATCH 21/83] DRY test --- spec/integration/server_spec.rb | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index b0684467..7b29a0af 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -125,25 +125,28 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) it 'can set and delete throttles for all the queues', js: true do q.put(Qless::Job, {}) - Throttle.new('testing', client).maximum.should eq(0) + text_field_class = ".throttle-#{q.name}" + throttle = Throttle.new(q.name, client) + + throttle.maximum.should eq(0) visit '/throttles' first('h3', text: /testing/i).should be - first('.throttle-testing', placeholder: /0/i).should be + first(text_field_class, placeholder: /0/i).should be - maximum = first('.throttle-testing') + maximum = first(text_field_class) maximum.set(3) maximum.trigger('blur'); - first('.throttle-testing', value: /3/i).should be + first(text_field_class, value: /3/i).should be - Throttle.new('testing', client).maximum.should eq(3) + throttle.maximum.should eq(3) first('button.btn-danger').click first('button.btn-danger').click - Throttle.new('testing', client).maximum.should eq(0) + throttle.maximum.should eq(0) end it 'can see the root-level summary' do From 9cdbc2ce823189317f8139eb11b2cadf8d622eb7 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Wed, 23 Apr 2014 13:34:54 -0400 Subject: [PATCH 22/83] additional comments --- lib/qless/server.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/qless/server.rb b/lib/qless/server.rb index 06562eae..66ac28a0 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -494,6 +494,7 @@ def strftime(t) end post '/delete_throttle' do + # Expects a JSON object: {'id': id} data = JSON.parse(request.body.read) if data['id'].nil? halt 400, 'Need throttle id' @@ -505,6 +506,7 @@ def strftime(t) end post '/update_throttle' do + # Expects a JSON object: {'id': id, 'maximum': maximum} data = JSON.parse(request.body.read) if data['id'].nil? || data['maximum'].nil? halt 400, 'Need throttle id and maximum value' From 021615530d15b39df7a5da7e44c8ef92281ddccb Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Wed, 23 Apr 2014 13:36:23 -0400 Subject: [PATCH 23/83] update throttle erb --- lib/qless/server/views/throttles.erb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb index 5b56744f..85ed77c5 100644 --- a/lib/qless/server/views/throttles.erb +++ b/lib/qless/server/views/throttles.erb @@ -1,4 +1,4 @@ -<% if client.throttles.counts.empty? %> +<% if throttles.empty? %> @@ -7,7 +7,7 @@

    Throttles By queue.

    - <% client.throttles.counts.each do |throttle| %> + <% throttles.each do |throttle| %>

    From 73df52d918a56090cedec51994fb1a8935e3a9cc Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Wed, 23 Apr 2014 13:38:53 -0400 Subject: [PATCH 24/83] remove comment --- lib/qless/server.rb | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/qless/server.rb b/lib/qless/server.rb index 66ac28a0..7f415878 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -202,7 +202,6 @@ def strftime(t) get '/throttles/?' do erb :throttles, layout: true, locals: { title: 'Throttles' - #throttles: @client.throttles } end From 7037cdb79f9d80db0b3bf199bf585036fd1dfd65 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Fri, 25 Apr 2014 11:34:58 -0400 Subject: [PATCH 25/83] update throttle endpoints --- lib/qless/server.rb | 46 +++++++++++++++---------------- lib/qless/server/views/layout.erb | 19 ++++++++----- spec/integration/server_spec.rb | 5 ++-- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/lib/qless/server.rb b/lib/qless/server.rb index 7f415878..e4ca2b07 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -205,6 +205,29 @@ def strftime(t) } end + post '/throttle' do + # Expects a JSON object: {'id': id, 'maximum': maximum} + data = JSON.parse(request.body.read) + if data['id'].nil? || data['maximum'].nil? + halt 400, 'Need throttle id and maximum value' + else + throttle = Throttle.new(data['id'], client) + throttle.maximum = data['maximum'] + end + end + + delete '/throttle' do + # Expects a JSON object: {'id': id} + data = JSON.parse(request.body.read) + if data['id'].nil? + halt 400, 'Need throttle id' + else + throttle = Throttle.new(data['id'], client) + throttle.delete + return json({id: throttle.id, maximum: throttle.maximum}) + end + end + get '/failed.json' do json(client.jobs.failed) end @@ -492,29 +515,6 @@ def strftime(t) end end - post '/delete_throttle' do - # Expects a JSON object: {'id': id} - data = JSON.parse(request.body.read) - if data['id'].nil? - halt 400, 'Need throttle id' - else - throttle = Throttle.new(data['id'], client) - throttle.delete - return json({id: throttle.id, maximum: throttle.maximum}) - end - end - - post '/update_throttle' do - # Expects a JSON object: {'id': id, 'maximum': maximum} - data = JSON.parse(request.body.read) - if data['id'].nil? || data['maximum'].nil? - halt 400, 'Need throttle id and maximum value' - else - throttle = Throttle.new(data['id'], client) - throttle.maximum = data['maximum'] - end - end - # start the server if ruby file executed directly run! if app_file == $PROGRAM_NAME end diff --git a/lib/qless/server/views/layout.erb b/lib/qless/server/views/layout.erb index d5c521a5..88dae9d4 100644 --- a/lib/qless/server/views/layout.erb +++ b/lib/qless/server/views/layout.erb @@ -332,14 +332,19 @@ } var delete_throttle = function(throttle_id) { - _ajax({ - url: '<%= u "/delete_throttle" %>', - data: { - 'id': throttle_id - }, success: function(data) { + var data = { 'id': throttle_id }; + + $.ajax({ + url: '<%= u "/throttle" %>', + type: 'DELETE', + dataType: 'json', + processData: false, + data: JSON.stringify(data), + success: function(data) { flash('Deleted throttle for ' + throttle_id, 'success'); $('.throttle-' + throttle_id).val(data['maximum']); - }, error: function(data) { + }, + error: function(data) { flash('Couldn\'t delete thottle ' + throttle_id); } }) @@ -347,7 +352,7 @@ var update_throttle = function(throttle_id, maximum) { _ajax({ - url: '<%= u "/update_throttle" %>', + url: '<%= u "/throttle" %>', data: { 'id': throttle_id, 'maximum': maximum diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 7b29a0af..42b97e23 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -140,13 +140,12 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) maximum.trigger('blur'); first(text_field_class, value: /3/i).should be - throttle.maximum.should eq(3) first('button.btn-danger').click first('button.btn-danger').click - - throttle.maximum.should eq(0) + + first(text_field_class, value: /0/i).should be end it 'can see the root-level summary' do From 4d5a659a6e24784b72fe2683a1c7ceee0c1ac604 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Fri, 25 Apr 2014 13:16:06 -0400 Subject: [PATCH 26/83] add pry to gemspec --- Gemfile.lock | 7 +++++++ qless.gemspec | 1 + spec/integration/server_spec.rb | 1 + 3 files changed, 9 insertions(+) diff --git a/Gemfile.lock b/Gemfile.lock index e1d7bdf0..70981f56 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -20,6 +20,7 @@ GEM xpath (~> 0.1.4) childprocess (0.3.9) ffi (~> 1.0, >= 1.0.11) + coderay (1.1.0) columnize (0.3.6) daemons (1.1.9) debugger (1.6.2) @@ -40,6 +41,7 @@ GEM http_parser.rb (0.5.3) launchy (2.1.2) addressable (~> 2.3) + method_source (0.8.2) metriks (0.9.9.5) atomic (~> 1.0) avl_tree (~> 1.1.2) @@ -60,6 +62,10 @@ GEM http_parser.rb (~> 0.5.3) multi_json (~> 1.0) powerpack (0.0.8) + pry (0.9.12.6) + coderay (~> 1.0) + method_source (~> 0.8) + slop (~> 3.4) rack (1.5.2) rack-protection (1.5.0) rack @@ -125,6 +131,7 @@ DEPENDENCIES launchy (~> 2.1.0) metriks (~> 0.9) poltergeist (~> 1.0.0) + pry qless! rake (~> 10.0) rspec (~> 2.12) diff --git a/qless.gemspec b/qless.gemspec index f9b13469..c423b2b6 100644 --- a/qless.gemspec +++ b/qless.gemspec @@ -52,4 +52,5 @@ language-specific extension will also remain up to date. s.add_development_dependency 'rubocop' , '~> 0.13.1' s.add_development_dependency 'rusage' , '~> 0.2.0' s.add_development_dependency 'timecop' , '~> 0.7.1' + s.add_development_dependency 'pry' end diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 42b97e23..af3e3dca 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -8,6 +8,7 @@ require 'capybara/rspec' require 'capybara/poltergeist' require 'rack/test' +require 'pry' Capybara.javascript_driver = :poltergeist From 7fb8028baa6319773702a60050d889bb6cf97c05 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Fri, 25 Apr 2014 13:16:57 -0400 Subject: [PATCH 27/83] can set expiration of throttles from ui --- lib/qless/server.rb | 11 ++++++ lib/qless/server/views/layout.erb | 20 ++++++++++- lib/qless/server/views/throttles.erb | 51 +++++++++++++++------------- spec/integration/server_spec.rb | 36 ++++++++++++++++++-- 4 files changed, 92 insertions(+), 26 deletions(-) diff --git a/lib/qless/server.rb b/lib/qless/server.rb index e4ca2b07..cb4c87e6 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -215,6 +215,17 @@ def strftime(t) throttle.maximum = data['maximum'] end end + + put '/throttle' do + # Expects a JSON object: {'id': id, 'expiration': expiration} + data = JSON.parse(request.body.read) + if data['id'].nil? || data['expiration'].nil? + halt 400, 'Need throttle id and expiration value' + else + throttle = Throttle.new(data['id'], client) + throttle.expiration = data['expiration'] + end + end delete '/throttle' do # Expects a JSON object: {'id': id} diff --git a/lib/qless/server/views/layout.erb b/lib/qless/server/views/layout.erb index 88dae9d4..eab48dd5 100644 --- a/lib/qless/server/views/layout.erb +++ b/lib/qless/server/views/layout.erb @@ -331,6 +331,24 @@ }) } + var expire_throttle = function(throttle_id, expiration) { + var data = { 'id': throttle_id, 'expiration': expiration }; + + $.ajax({ + url: '<%= u "/throttle" %>', + type: 'PUT', + dataType: 'json', + processData: false, + data: JSON.stringify(data), + success: function(data) { + flash('Set expiration for throttle ' + throttle_id, 'success'); + }, + error: function(data) { + flash('Couldn\'t update expiration for throttle ' + throttle_id); + } + }) + } + var delete_throttle = function(throttle_id) { var data = { 'id': throttle_id }; @@ -342,7 +360,7 @@ data: JSON.stringify(data), success: function(data) { flash('Deleted throttle for ' + throttle_id, 'success'); - $('.throttle-' + throttle_id).val(data['maximum']); + $('.' + throttle_id + '-maximum').val(data['maximum']); }, error: function(data) { flash('Couldn\'t delete thottle ' + throttle_id); diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb index 85ed77c5..c4f297b9 100644 --- a/lib/qless/server/views/throttles.erb +++ b/lib/qless/server/views/throttles.erb @@ -7,27 +7,32 @@

    Throttles By queue.

    - <% throttles.each do |throttle| %> -
    -
    -

    - <%= throttle.id %> -

    -
    -
    -
    -
    - -
    -
    -
    -
    -
    - -
    -
    -
    - <% end %> + + + + + + + + + + + <% throttles.each do |throttle| %> + + + + + + + <% end %> + +
    Queue Maximum TTL (sets expiration) Delete
    <%= throttle.id %> + + + + + +
    <% end %> diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index af3e3dca..62dd5a2f 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -126,14 +126,14 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) it 'can set and delete throttles for all the queues', js: true do q.put(Qless::Job, {}) - text_field_class = ".throttle-#{q.name}" + text_field_class = ".#{q.name}-maximum" throttle = Throttle.new(q.name, client) throttle.maximum.should eq(0) visit '/throttles' - first('h3', text: /testing/i).should be + first('td', text: /#{q.name}/i).should be first(text_field_class, placeholder: /0/i).should be maximum = first(text_field_class) @@ -149,6 +149,38 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) first(text_field_class, value: /0/i).should be end + it 'can set the expiration for throttles', js: true do + q.put(Qless::Job, {}) + + maximum_field_class = ".#{q.name}-maximum" + expiration_field_class = ".#{q.name}-expiration" + throttle = Throttle.new(q.name, client) + + throttle.maximum.should eq(0) + throttle.ttl.should eq(-2) + + visit '/throttles' + + first('td', text: /#{q.name}/i).should be + first(expiration_field_class, placeholder: /-2/i).should be + + maximum = first(maximum_field_class) + maximum.set(3) + maximum.trigger('blur'); + + first(maximum_field_class, value: /3/i).should be + throttle.maximum.should eq(3) + + expiration = first(expiration_field_class) + expiration.set(1) + expiration.trigger('blur'); + + visit '/throttles' + + first(maximum_field_class, value: /0/i).should be + first(expiration_field_class, placeholder: /-2/i).should be + end + it 'can see the root-level summary' do visit '/' From da391b4a6a65f9df7af76652a47e4fc16c39d147 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Fri, 25 Apr 2014 13:21:39 -0400 Subject: [PATCH 28/83] change 'Delete' to 'Reset' --- lib/qless/server/views/throttles.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb index c4f297b9..7f0a2239 100644 --- a/lib/qless/server/views/throttles.erb +++ b/lib/qless/server/views/throttles.erb @@ -13,7 +13,7 @@ Queue Maximum TTL (sets expiration) - Delete + Reset From f2578bcdbe42d1c530386b92872c7105e260ef21 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Tue, 29 Apr 2014 17:26:18 -0400 Subject: [PATCH 29/83] add ui for job throttles --- lib/qless/job.rb | 4 ++ lib/qless/server/views/_job.erb | 32 +++++++++++-- lib/qless/server/views/throttles.erb | 2 +- spec/integration/server_spec.rb | 72 +++++++++++++++++++++++++--- 4 files changed, 98 insertions(+), 12 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index 40a14712..ce4c0325 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -167,6 +167,10 @@ def ttl @expires_at - Time.now.to_f end + def throttle_objects + throttles.map { |name| Throttle.new(name, client) } + end + def reconnect_to_redis @client.redis.client.reconnect end diff --git a/lib/qless/server/views/_job.erb b/lib/qless/server/views/_job.erb index c99c93bf..59c21dd6 100644 --- a/lib/qless/server/views/_job.erb +++ b/lib/qless/server/views/_job.erb @@ -18,7 +18,7 @@
    <% if (job.state != "complete") %> - + <% end %> <% if (job.state == "running") %> @@ -102,7 +102,7 @@ -
    +
    @@ -152,7 +152,29 @@ - <% end %> + <% end %> + + <% job.throttle_objects.each do |throttle| %> +
    +
    +

    <%= throttle.id %>

    +
    +
    + +
    +
    + +
    +
    + +
    +
    +

    ( maximum / TTL / reset )

    +
    +
    + <% end %>
    @@ -202,7 +224,7 @@ -
    +
    <% job.tags.each do |tag| %>
    @@ -231,7 +253,7 @@
    <% end %> -
    +
    <% end %> diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb index 7f0a2239..c3232cd2 100644 --- a/lib/qless/server/views/throttles.erb +++ b/lib/qless/server/views/throttles.erb @@ -4,7 +4,7 @@ <% else %> diff --git a/spec/integration/server_spec.rb b/spec/integration/server_spec.rb index 62dd5a2f..331eb097 100644 --- a/spec/integration/server_spec.rb +++ b/spec/integration/server_spec.rb @@ -123,7 +123,7 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) test_pagination end - it 'can set and delete throttles for all the queues', js: true do + it 'can set and delete queues throttles', js: true do q.put(Qless::Job, {}) text_field_class = ".#{q.name}-maximum" @@ -138,7 +138,7 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) maximum = first(text_field_class) maximum.set(3) - maximum.trigger('blur'); + maximum.trigger('blur') first(text_field_class, value: /3/i).should be throttle.maximum.should eq(3) @@ -149,7 +149,7 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) first(text_field_class, value: /0/i).should be end - it 'can set the expiration for throttles', js: true do + it 'can set the expiration for queue throttles', js: true do q.put(Qless::Job, {}) maximum_field_class = ".#{q.name}-maximum" @@ -166,20 +166,80 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) maximum = first(maximum_field_class) maximum.set(3) - maximum.trigger('blur'); + maximum.trigger('blur') first(maximum_field_class, value: /3/i).should be throttle.maximum.should eq(3) expiration = first(expiration_field_class) expiration.set(1) - expiration.trigger('blur'); + expiration.trigger('blur') visit '/throttles' first(maximum_field_class, value: /0/i).should be first(expiration_field_class, placeholder: /-2/i).should be end + + it 'can set and delete job throttles', js: true do + t_id = 'wakka' # the throttle id + jid = q.put(Qless::Job, {}, throttles: [t_id]) + + text_field_class = ".#{t_id}-maximum" + throttle = Throttle.new(t_id, client) + + throttle.maximum.should eq(0) + + visit "/jobs/#{jid}" + + page.should have_content(t_id) + first(".#{t_id}-maximum", placeholder: /0/i).should be + + maximum = first(".#{t_id}-maximum") + maximum.set(3) + maximum.trigger('blur') + + first(".#{t_id}-maximum", value: /3/i).should be + throttle.maximum.should eq(3) + + first('button.btn-danger.remove-throttle').click + first('button.btn-danger.remove-throttle').click + + first(".#{t_id}-maximum", value: /0/i).should be + end + + it 'can set the expiration for job throttles', js: true do + t_id = 'wakka' # the throttle id + jid = q.put(Qless::Job, {}, throttles: [t_id]) + + maximum_field_class = ".#{t_id}-maximum" + expiration_field_class = ".#{t_id}-expiration" + throttle = Throttle.new(t_id, client) + + throttle.maximum.should eq(0) + throttle.ttl.should eq(-2) + + visit "/jobs/#{jid}" + + page.should have_content(t_id) + first(".#{t_id}-expiration", placeholder: /-2/i).should be + + maximum = first(".#{t_id}-maximum") + maximum.set(3) + maximum.trigger('blur') + + first(".#{t_id}-maximum", value: /3/i).should be + throttle.maximum.should eq(3) + + expiration = first(".#{t_id}-expiration") + expiration.set(1) + expiration.trigger('blur') + + visit "/jobs/#{jid}" + + first(".#{t_id}-maximum", value: /0/i).should be + first(".#{t_id}-expiration", placeholder: /-2/i).should be + end it 'can see the root-level summary' do visit '/' @@ -329,7 +389,7 @@ def test_pagination(page_1_jid = 1, page_2_jid = 27) job.move('testing') q.pop.complete visit "/jobs/#{job.jid}" - first('i.icon-remove').should be_nil + first('i.icon-remove.cancel-job').should be_nil first('i.icon-repeat').should be_nil first('i.icon-flag').should be first('i.caret').should be From ddc9752bbea57de1ad013793d2089faa9a234e46 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Wed, 30 Apr 2014 11:10:49 -0400 Subject: [PATCH 30/83] fix spacing --- lib/qless/server/views/_job.erb | 428 ++++++++++++++++---------------- 1 file changed, 214 insertions(+), 214 deletions(-) diff --git a/lib/qless/server/views/_job.erb b/lib/qless/server/views/_job.erb index 59c21dd6..a72c89fa 100644 --- a/lib/qless/server/views/_job.erb +++ b/lib/qless/server/views/_job.erb @@ -1,157 +1,157 @@ <% if job.instance_of?(Qless::Job) %> -
    -
    -
    -
    -

    - "><%= job.jid[0..8] %>... | <%= job.klass_name %> -

    -
    -
    -

    - - | <%= job.state %> / "><%= job.queue_name %><%= job.worker_name.nil? ? "/ #{job.worker_name}" : "" %> - -

    -
    -
    -
    -
    - <% if (job.state != "complete") %> - - <% end %> - <% if (job.state == "running") %> - - <% end %> - - <% if (job.state == 'failed') %> - - <% end %> - - -
    -
    -
    -
    - - - -
    -
    -
    -
    +
    +
    +
    +
    +

    + "><%= job.jid[0..8] %>... | <%= job.klass_name %> +

    +
    +
    +

    + + | <%= job.state %> / "><%= job.queue_name %><%= job.worker_name.nil? ? "/ #{job.worker_name}" : "" %> + +

    +
    +
    +
    +
    + <% if (job.state != "complete") %> + + <% end %> + <% if (job.state == "running") %> + + <% end %> + + <% if (job.state == 'failed') %> + + <% end %> + + +
    +
    +
    +
    + + + +
    +
    +
    +
    - <% if not job.dependencies.empty? %> -
    -
    -

    Dependencies:

    - <% job.dependencies.each do |jid| %> -
    "> - - -
    - <% end %> -
    -
    - <% end %> + <% if not job.dependencies.empty? %> +
    +
    +

    Dependencies:

    + <% job.dependencies.each do |jid| %> +
    "> + + +
    + <% end %> +
    +
    + <% end %> - <% if not job.dependents.empty? %> -
    -
    -

    Dependents:

    - <% job.dependents.each do |jid| %> -
    "> - - -
    - <% end %> -
    -
    - <% end %> + <% if not job.dependents.empty? %> +
    +
    +

    Dependents:

    + <% job.dependents.each do |jid| %> +
    "> + + +
    + <% end %> +
    +
    + <% end %> -
    -
    - <% job.tags.each do |tag| %> -
    - <%= tag %> - -
    - <% end %> +
    +
    + <% job.tags.each do |tag| %> +
    + <%= tag %> + +
    + <% end %> - -
    - - + +
    + +
    -
    -
    +
    +
    - <% if not defined? brief %> -
    -
    -

    Data

    -
    <%= JSON.pretty_generate(job.data) %>
    -
    -
    -

    History

    -
    - <% job.queue_history.reverse.each do |h| %> - <% if h['what'] == 'put' %> -
    <%= h['what'] %> at <%= strftime(h['when']) %>
    +      <% if not defined? brief %>
    +      
    +
    +

    Data

    +
    <%= JSON.pretty_generate(job.data) %>
    +
    +
    +

    History

    +
    + <% job.queue_history.reverse.each do |h| %> + <% if h['what'] == 'put' %> +
    <%= h['what'] %> at <%= strftime(h['when']) %>
         in queue <%= h['q'] %>
    - <% elsif h['what'] == 'popped' %> -
    <%= h['what'] %> at <%= strftime(h['when']) %>
    +            <% elsif h['what'] == 'popped' %>
    +              
    <%= h['what'] %> at <%= strftime(h['when']) %>
         by <%= h['worker'] %>
    - <% elsif h['what'] == 'done' %> -
    completed at <%= strftime(h['when']) %>
    - <% elsif h['what'] == 'failed' %> - <% if h['worker'] %> -
    <%= h['what'] %> at <%= strftime(h['when']) %>
    +            <% elsif h['what'] == 'done' %>
    +              
    completed at <%= strftime(h['when']) %>
    + <% elsif h['what'] == 'failed' %> + <% if h['worker'] %> +
    <%= h['what'] %> at <%= strftime(h['when']) %>
         by <%= h['worker'] %>
         in group <%= h['group'] %>
    - <% else %> -
    <%= h['what'] %> at <%= strftime(h['when']) %>
    +              <% else %>
    +                
    <%= h['what'] %> at <%= strftime(h['when']) %>
         in group <%= h['group'] %>
    - <% end %> - <% else %> -
    <%= h['what'] %> at <%= strftime(h['when']) %>
    - <% end %> - <% end %> -
    -
    -
    - <% end %> + <% end %> + <% else %> +
    <%= h['what'] %> at <%= strftime(h['when']) %>
    + <% end %> + <% end %> +
    +
    +
    + <% end %> - <% if job.failure.length > 0 %> -
    -
    -
    -

    In <%= job.queue_name %> on <%= job.failure['worker'] %> - about <%= strftime(Time.at(job.failure['when'])) %>

    -
    <%= job.failure['message'].gsub('>', '>').gsub('<', '<') %>
    -
    -
    -
    + <% if job.failure.length > 0 %> +
    +
    +
    +

    In <%= job.queue_name %> on <%= job.failure['worker'] %> + about <%= strftime(Time.at(job.failure['when'])) %>

    +
    <%= job.failure['message'].gsub('>', '>').gsub('<', '<') %>
    +
    +
    +
    <% end %> <% job.throttle_objects.each do |throttle| %> @@ -175,85 +175,85 @@
    <% end %> -
    -
    -
    +
    +
    + <% else # Recurring job %> -
    -
    -
    -
    -

    - "><%= job.jid[0..8] %>... | <%= job.klass_name %> -

    -
    -
    -

    - - | recurring / "><%= job.queue_name %> - -

    -
    -
    -
    -
    - - - -
    -
    -
    -
    - - + + +
    +
    +
    +
    + + - -
    -
    -
    -
    + +
    +
    + +
    -
    - <% job.tags.each do |tag| %> -
    - <%= tag %> - -
    - <% end %> +
    + <% job.tags.each do |tag| %> +
    + <%= tag %> + +
    + <% end %> - -
    - - -
    -
    -
    + +
    + + +
    +
    + - <% if not defined? brief %> -
    -
    -

    Data

    -
    <%= JSON.pretty_generate(job.data) %>
    -
    -
    - <% end %> + <% if not defined? brief %> +
    +
    +

    Data

    +
    <%= JSON.pretty_generate(job.data) %>
    +
    +
    + <% end %>
    - - + + <% end %> From 89de25059e50252034b436e0f6255cd62706be40 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Mon, 12 May 2014 09:49:24 -0400 Subject: [PATCH 31/83] updated qless lua scripts --- lib/qless/lua/qless-lib.lua | 8 ++++++-- lib/qless/lua/qless.lua | 6 ++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index f2404a9b..03799ad0 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: a9c3b988a5e3150a5d01b698d882bdbf8b264c42 +-- Current SHA: 3108245a22bf30415f9f3db85059d238ef35c4b0 -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -2695,7 +2695,11 @@ end -- Releases the lock taken by the specified jid. -- number of jobs released back into the queues is determined by the locks_available method. function QlessThrottle:release(now, jid) - self.locks.remove(jid) + -- Only attempt to remove from the pending set if the job wasn't found in the + -- locks set + if self.locks.remove(jid) == 0 then + self.pending.remove(jid) + end local available_locks = self:locks_available() if self.pending.length() == 0 or available_locks < 1 then diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 1beb1531..88c85042 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: a9c3b988a5e3150a5d01b698d882bdbf8b264c42 +-- Current SHA: 3108245a22bf30415f9f3db85059d238ef35c4b0 -- This is a generated file local Qless = { ns = 'ql:' @@ -2022,7 +2022,9 @@ function QlessThrottle:pend(now, jid) end function QlessThrottle:release(now, jid) - self.locks.remove(jid) + if self.locks.remove(jid) == 0 then + self.pending.remove(jid) + end local available_locks = self:locks_available() if self.pending.length() == 0 or available_locks < 1 then From e5a9865c3481c1298736a9950783064f44282e6c Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Mon, 12 May 2014 17:23:21 -0400 Subject: [PATCH 32/83] fix queue throttle ui --- lib/qless.rb | 2 +- lib/qless/server/views/throttles.erb | 4 ++-- spec/integration/server_spec.rb | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/qless.rb b/lib/qless.rb index 970c4858..548b5c89 100644 --- a/lib/qless.rb +++ b/lib/qless.rb @@ -149,7 +149,7 @@ def [](name) def counts @client.queues.counts.map do |queue| - Throttle.new(queue['name'], @client) + Throttle.new("ql:q:#{queue['name']}", @client) end end end diff --git a/lib/qless/server/views/throttles.erb b/lib/qless/server/views/throttles.erb index c3232cd2..661f4bc8 100644 --- a/lib/qless/server/views/throttles.erb +++ b/lib/qless/server/views/throttles.erb @@ -21,10 +21,10 @@
    <%= throttle.id %> - + - +
    <%= throttle.id %> - + - + + <% end %> <% if (job.state == "running") %> @@ -59,7 +59,7 @@

    Dependencies:

    <% job.dependencies.each do |jid| %>
    "> - + @@ -75,7 +75,7 @@

    Dependents:

    <% job.dependents.each do |jid| %>
    "> - + @@ -95,7 +95,7 @@
    <% end %> - +
    @@ -184,13 +184,13 @@

    - "><%= job.jid[0..8] %>... | <%= job.klass_name %> + " title="<%= job.jid %>"><%= job.jid[0..8] %>... | <%= job.klass_name %>

    @@ -223,7 +223,7 @@
    - +
    <% job.tags.each do |tag| %> @@ -234,7 +234,7 @@
    <% end %> - +
    From dd87a47b33cec5c609d04dd7b12e9e272ad8eddf Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Wed, 19 Mar 2014 14:14:41 -0700 Subject: [PATCH 52/83] Fix brittle test. --- spec/integration/subscriber_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/integration/subscriber_spec.rb b/spec/integration/subscriber_spec.rb index 7dca2d53..602fa826 100644 --- a/spec/integration/subscriber_spec.rb +++ b/spec/integration/subscriber_spec.rb @@ -9,7 +9,7 @@ module Qless describe Subscriber, :integration, :uses_threads do - let(:channel) { SecureRandom.uuid } # use a unique channel + let(:channel) { Qless.generate_jid } # use a unique channel let(:logger) { StringIO.new } def publish(message) From 0c68724adbc59adf61bb3c9bfb1a1c2cb289e3a2 Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Wed, 19 Mar 2014 15:45:55 -0700 Subject: [PATCH 53/83] Use something more intention revealing. --- spec/integration/subscriber_spec.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/integration/subscriber_spec.rb b/spec/integration/subscriber_spec.rb index 602fa826..7dca2d53 100644 --- a/spec/integration/subscriber_spec.rb +++ b/spec/integration/subscriber_spec.rb @@ -9,7 +9,7 @@ module Qless describe Subscriber, :integration, :uses_threads do - let(:channel) { Qless.generate_jid } # use a unique channel + let(:channel) { SecureRandom.uuid } # use a unique channel let(:logger) { StringIO.new } def publish(message) From 3ca9a66a3ee82d2a469355e11daf4792ad2ee170 Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Thu, 17 Apr 2014 13:47:33 -0700 Subject: [PATCH 54/83] Improve RetryExceptions middleware to support exception-dependent backoffs. --- lib/qless/middleware/retry_exceptions.rb | 30 ++++++++---------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/lib/qless/middleware/retry_exceptions.rb b/lib/qless/middleware/retry_exceptions.rb index 2839551c..d97b69eb 100644 --- a/lib/qless/middleware/retry_exceptions.rb +++ b/lib/qless/middleware/retry_exceptions.rb @@ -17,8 +17,6 @@ def around_perform(job) attempt_num = (job.original_retries - job.retries_left) + 1 failure = Qless.failure_formatter.format(job, error) job.retry(backoff_strategy.call(attempt_num, error), *failure) - - on_retry_callback.call(error, job) end def retryable_exception_classes @@ -39,27 +37,19 @@ def backoff_strategy @backoff_strategy ||= NO_BACKOFF_STRATEGY end - DEFAULT_ON_RETRY_CALLBACK = lambda { |error, job| } - def use_on_retry_callback(&block) - @on_retry_callback = block if block - end + def exponential(base, options = {}) + fuzz_factor = options.fetch(:fuzz_factor, 0) - def on_retry_callback - @on_retry_callback ||= DEFAULT_ON_RETRY_CALLBACK - end + lambda do |num, _error| + unfuzzed = base**num - # If `factor` is omitted it is set to `delay_seconds` to reproduce legacy - # behavior. - def exponential(delay_seconds, options={}) - factor = options.fetch(:factor, delay_seconds) - fuzz_factor = options.fetch(:fuzz_factor, 0) + fuzz = 0 + unless fuzz_factor.zero? + max_fuzz = unfuzzed * fuzz_factor + fuzz = rand(max_fuzz) * [1, -1].sample + end - lambda do |retry_no, error| - unfuzzed = delay_seconds * factor**(retry_no - 1) - return unfuzzed if fuzz_factor.zero? - r = 2 * rand - 1 - # r is uniformly distributed in range [-1, 1] - unfuzzed * (1 + fuzz_factor * r) + unfuzzed + fuzz end end end From ec963e39af67fc160a2d91013d15e6a25004bd9b Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Thu, 12 Jun 2014 13:05:33 -0700 Subject: [PATCH 55/83] Rename `move` to `dequeue`. --- lib/qless/job.rb | 18 +- lib/qless/middleware/requeue_exceptions.rb | 41 +-- .../middleware/requeue_exceptions_spec.rb | 317 ++++++------------ 3 files changed, 116 insertions(+), 260 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index aff945a9..9d68d0fb 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -44,7 +44,7 @@ class Job < BaseJob attr_reader :klass_name, :tracked, :dependencies, :dependents attr_reader :original_retries, :retries_left, :raw_queue_history attr_reader :state_changed - attr_accessor :data, :priority, :tags, :throttles + attr_accessor :data, :priority, :tags alias_method(:state_changed?, :state_changed) MiddlewareMisconfiguredError = Class.new(StandardError) @@ -63,9 +63,6 @@ def perform return fail("#{queue_name}-NameError", "Cannot find #{klass_name}") end - # log a real process executing job -- before we start processing - log("started by pid:#{Process.pid}") - middlewares = Job.middlewares_on(klass) if middlewares.last == SupportsMiddleware @@ -78,7 +75,7 @@ def perform elsif !klass.respond_to?(:perform) # If the klass doesn't have a :perform method, we should raise an error fail("#{queue_name}-method-missing", - "#{klass_name} has no perform method") + "#{klass_name} has no peform method") else klass.perform(self) end @@ -102,8 +99,7 @@ def self.build(client, klass, attributes = {}) 'failure' => {}, 'history' => [], 'dependencies' => [], - 'dependents' => [], - 'throttles' => [], + 'dependents' => [] } attributes = defaults.merge(Qless.stringify_hash_keys(attributes)) attributes['data'] = JSON.dump(attributes['data']) @@ -120,7 +116,7 @@ def self.middlewares_on(job_klass) def initialize(client, atts) super(client, atts.fetch('jid')) %w{jid data priority tags state tracked - failure dependencies dependents throttles spawned_from_jid}.each do |att| + failure dependencies dependents spawned_from_jid}.each do |att| instance_variable_set(:"@#{att}", atts.fetch(att)) end @@ -172,10 +168,6 @@ def ttl @expires_at - Time.now.to_f end - def throttle_objects - throttles.map { |name| Throttle.new(name, client) } - end - def reconnect_to_redis @client.redis.client.reconnect end @@ -233,7 +225,7 @@ def to_hash # Move this from it's current queue into another def requeue(queue, opts = {}) note_state_change :requeue do - @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, + @client.call('put', @client.worker_name, queue, @jid, @klass_name, JSON.dump(opts.fetch(:data, @data)), opts.fetch(:delay, 0), 'priority', opts.fetch(:priority, @priority), diff --git a/lib/qless/middleware/requeue_exceptions.rb b/lib/qless/middleware/requeue_exceptions.rb index 98311325..eb1d2651 100644 --- a/lib/qless/middleware/requeue_exceptions.rb +++ b/lib/qless/middleware/requeue_exceptions.rb @@ -17,19 +17,17 @@ module Middleware # to be retried many times, w/o having other transient errors retried so # many times. module RequeueExceptions - RequeueableException = Struct.new(:klass, :delay_min, :delay_span, :max_attempts) do + RequeueableException = Struct.new(:klass, :delay_range, :max_attempts) do def self.from_splat_and_options(*klasses, options) - delay_range = options.fetch(:delay_range) - delay_min = Float(delay_range.min) - delay_span = Float(delay_range.max) - Float(delay_range.min) - max_attempts = options.fetch(:max_attempts) klasses.map do |klass| - new(klass, delay_min, delay_span, max_attempts) + new(klass, + options.fetch(:delay_range).to_a, + options.fetch(:max_attempts)) end end def delay - delay_min + Random.rand(delay_span) + delay_range.sample end def raise_if_exhausted_requeues(error, requeues) @@ -44,38 +42,19 @@ def requeue_on(*exceptions, options) end end - DEFAULT_ON_REQUEUE_CALLBACK = lambda { |error, job| } - def use_on_requeue_callback(&block) - @on_requeue_callback = block if block - end - - def on_requeue_callback - @on_requeue_callback ||= DEFAULT_ON_REQUEUE_CALLBACK - end - - def handle_exception(job, error) - config = requeuable_exception_for(error) + def around_perform(job) + super + rescue *requeueable_exceptions.keys => e + config = requeuable_exception_for(e) requeues_by_exception = (job.data['requeues_by_exception'] ||= {}) requeues_by_exception[config.klass.name] ||= 0 config.raise_if_exhausted_requeues( - error, requeues_by_exception[config.klass.name]) + e, requeues_by_exception[config.klass.name]) requeues_by_exception[config.klass.name] += 1 job.requeue(job.queue_name, delay: config.delay, data: job.data) - - on_requeue_callback.call(error, job) - end - - def around_perform(job) - super - rescue *requeueable_exceptions.keys => e - handle_exception(job, e) - end - - def requeueable?(exception) - requeueable_exceptions.member?(exception) end def requeueable_exceptions diff --git a/spec/unit/middleware/requeue_exceptions_spec.rb b/spec/unit/middleware/requeue_exceptions_spec.rb index 95f294ff..ae42b138 100644 --- a/spec/unit/middleware/requeue_exceptions_spec.rb +++ b/spec/unit/middleware/requeue_exceptions_spec.rb @@ -16,265 +16,150 @@ def around_perform(job) end end - let(:container) do - container = container_class.new - container.extend(RequeueExceptions) - container + let(:container) { container_class.new } + let(:job) do + instance_double( + 'Qless::Job', requeue: nil, queue_name: 'my-queue', data: {}) end + let(:delay_range) { (0..30) } + let(:max_attempts) { 20 } - describe ".requeue_on" do - it "does not throw with empty class list" do - container.requeue_on(delay_range: 1..10, - max_attempts: 1) - end + matched_exception_1 = ZeroDivisionError + matched_exception_2 = KeyError + unmatched_exception = RegexpError - it "throws KeyError if no max_attempts" do - expect do - container.requeue_on(delay_range: 1..10) - end.to raise_error(KeyError) - end - - it "throws KeyError if no delay_range" do - expect do - container.requeue_on(max_attempts: 1) - end.to raise_error(KeyError) - end - - it "throws NoMethodError if delay_range does not respond to .min or .max" do - expect do - container.requeue_on(delay_range: 1, max_attempts: 1) - end.to raise_error(NoMethodError) - end - - it "throws ArgumentError if delay_range is not numerical" do - expect do - container.requeue_on(delay_range: "a".."z", max_attempts: 1) - end.to raise_error(ArgumentError) + module MessageSpecificException + def self.===(other) + ArgumentError === other && other.message.include?("foo") end + end - it "throws TypeError if delay_range is empty" do - expect do - container.requeue_on(delay_range: 2..1, max_attempts: 1) - end.to raise_error(TypeError) - end + before do + container.extend(RequeueExceptions) + container.requeue_on(matched_exception_1, matched_exception_2, + MessageSpecificException, + delay_range: delay_range, + max_attempts: max_attempts) + end - it "throws TypeError on empty delay_range" do - expect do - container.requeue_on(delay_range: 1..0, max_attempts: 1) - end.to raise_error(TypeError) - end + def perform + container.around_perform(job) + end - it "adds exceptions to requeable collection on success" do - container.requeue_on(ArgumentError, TypeError, delay_range: 1..2, max_attempts: 2) - expect(container.requeueable_exceptions).to include(ArgumentError, TypeError) - end + context 'when no exception is raised' do + before { container.perform = -> { } } - it "updates exceptions on repeated .requeue_on" do - container.requeue_on(ArgumentError, TypeError, delay_range: 1..2, max_attempts: 2) - container.requeue_on(TypeError, KeyError, delay_range: 1..2, max_attempts: 3) - expect(container.requeueable_exceptions).to include(ArgumentError, TypeError, KeyError) - expect(container.requeueable_exceptions[KeyError].max_attempts).to eq(3); + it 'does not requeue the job' do + job.should_not_receive(:requeue) + perform end end - describe ".requeueable?" do - before do - container.requeue_on(KeyError, delay_range: 1..2, max_attempts: 3) - end + context 'when an unmatched exception is raised' do + before { container.perform = -> { raise unmatched_exception } } - it 'returns false if exception is not requeue_on' do - expect(container.requeueable?(TypeError)).to be(false) - end - - it 'returns true when exception requeued on' do - expect(container.requeueable?(KeyError)).to be(true) + it 'allows the error to propagate' do + job.should_not_receive(:requeue) + expect { perform }.to raise_error(unmatched_exception) end end - context "when requeue_on successful" do + shared_context "requeues on matching exception" do |exception, exception_name| + before { container.perform = -> { raise_exception } } - let(:job) do - instance_double( - 'Qless::Job', requeue: nil, queue_name: 'my-queue', data: {}) + it 'requeues the job' do + job.should_receive(:requeue).with('my-queue', anything) + perform end - let(:delay_range) { (0..30) } - let(:max_attempts) { 20 } - matched_exception_1 = ZeroDivisionError - matched_exception_2 = KeyError - unmatched_exception = RegexpError + it 'uses a random delay from the delay_range' do + Kernel.srand(100) + sample = delay_range.to_a.sample - module MessageSpecificException - def self.===(other) - ArgumentError === other && other.message.include?("foo") - end - end + job.should_receive(:requeue).with( + 'my-queue', hash_including(delay: sample)) - before do - ## container.extend(RequeueExceptions) - container.requeue_on(matched_exception_1, matched_exception_2, - MessageSpecificException, - delay_range: delay_range, - max_attempts: max_attempts) + Kernel.srand(100) + perform end - def set_requeue_callback - container.use_on_requeue_callback { |error, job| callback_catcher << [error, job] } - end + it 'tracks the number of requeues for this error' do + expected_first_time = { + 'requeues_by_exception' => { exception_name => 1 } } + job.should_receive(:requeue).with('my-queue', hash_including( + data: expected_first_time + )) + perform - def callback_catcher - @callback_catcher ||= [] - end + job.data.merge!(expected_first_time) - def perform - container.around_perform(job) + job.should_receive(:requeue).with('my-queue', hash_including( + data: { 'requeues_by_exception' => { exception_name => 2 } } + )) + perform end - describe '.use_on_requeue_callback' do - it 'uses a default callback if none is given' do - expect(container.on_requeue_callback).to eq( - RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) - end + it 'preserves other requeues_by_exception values' do + job.data['requeues_by_exception'] = { 'SomeKlass' => 3 } - it 'accepts a block to set an after requeue callback' do - container.use_on_requeue_callback { |*| true } - expect(container.on_requeue_callback).not_to eq( - RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) - end + job.should_receive(:requeue).with('my-queue', hash_including( + data: { + 'requeues_by_exception' => { + exception_name => 1, 'SomeKlass' => 3 + } } + )) + perform end - context 'when no exception is raised' do - before { container.perform = -> { } } + it 'preserves other data' do + job.data['foo'] = 3 - it 'does not requeue the job' do - job.should_not_receive(:requeue) - perform - end + job.should_receive(:requeue).with('my-queue', hash_including( + data: { + 'requeues_by_exception' => { exception_name => 1 }, + 'foo' => 3 } + )) + perform end - context 'when an unmatched exception is raised' do - before { container.perform = -> { raise unmatched_exception } } - - it 'allows the error to propagate' do - job.should_not_receive(:requeue) - expect { perform }.to raise_error(unmatched_exception) - end - - context 'when an after requeue callback is set' do - before { set_requeue_callback } - - it 'does not call the callback' do - expect { perform }.to raise_error(unmatched_exception) + it 'allow the error to propogate after max_attempts' do + job.data['requeues_by_exception'] = { + exception_name => max_attempts } + job.should_not_receive(:requeue) - expect(callback_catcher.size).to eq(0) - end - end - end - - shared_context "requeues on matching exception" do |exception, exception_name| - before { container.perform = -> { raise_exception } } - - it 'requeues the job' do - job.should_receive(:requeue).with('my-queue', anything) - perform - end - - it 'uses a random delay from the delay_range' do - job.should_receive(:requeue) do |qname, hash| - expect(qname).to eq('my-queue') - expect(hash[:delay]).to be_between(delay_range.min, delay_range.max) - end - perform - end - - it 'tracks the number of requeues for this error' do - expected_first_time = { - 'requeues_by_exception' => { exception_name => 1 } } - job.should_receive(:requeue).with('my-queue', hash_including( - data: expected_first_time - )) - perform - - job.data.merge!(expected_first_time) - - job.should_receive(:requeue).with('my-queue', hash_including( - data: { 'requeues_by_exception' => { exception_name => 2 } } - )) - perform - end - - it 'preserves other requeues_by_exception values' do - job.data['requeues_by_exception'] = { 'SomeKlass' => 3 } - - job.should_receive(:requeue).with('my-queue', hash_including( - data: { - 'requeues_by_exception' => { - exception_name => 1, 'SomeKlass' => 3 - } } - )) - perform - end - - it 'preserves other data' do - job.data['foo'] = 3 - - job.should_receive(:requeue).with('my-queue', hash_including( - data: { - 'requeues_by_exception' => { exception_name => 1 }, - 'foo' => 3 } - )) - perform - end - - it 'allow the error to propogate after max_attempts' do - job.data['requeues_by_exception'] = { - exception_name => max_attempts } - job.should_not_receive(:requeue) - - expect { perform }.to raise_error(exception) - end - - context 'when an after requeue callback is set' do - before { set_requeue_callback } - - it 'calls the callback' do - expect { - perform - }.to change { callback_catcher.size }.from(0).to(1) - end - end + expect { perform }.to raise_error(exception) end + end - context "when a matched exception is raised" do - include_examples "requeues on matching exception", matched_exception_1, matched_exception_1.name do - define_method(:raise_exception) { raise matched_exception_1 } - end + context "when a matched exception is raised" do + include_examples "requeues on matching exception", matched_exception_1, matched_exception_1.name do + define_method(:raise_exception) { raise matched_exception_1 } end + end - context "when another matched exception is raised" do - include_examples "requeues on matching exception", matched_exception_2, matched_exception_2.name do - define_method(:raise_exception) { raise matched_exception_2 } - end + context "when another matched exception is raised" do + include_examples "requeues on matching exception", matched_exception_2, matched_exception_2.name do + define_method(:raise_exception) { raise matched_exception_2 } end + end - context "when a subclass of a matched exception is raised" do - exception = Class.new(matched_exception_1) - include_examples "requeues on matching exception", exception, matched_exception_1.name do - define_method(:raise_exception) { raise exception } - end + context "when a subclass of a matched exception is raised" do + exception = Class.new(matched_exception_1) + include_examples "requeues on matching exception", exception, matched_exception_1.name do + define_method(:raise_exception) { raise exception } end + end - context "when an exception is raised that matches a listed on using `===` but not `is_a?" do - let(:exception_instance) { ArgumentError.new("Bad foo") } + context "when an exception is raised that matches a listed on using `===` but not `is_a?" do + let(:exception_instance) { ArgumentError.new("Bad foo") } - before do - expect(exception_instance).not_to be_a(MessageSpecificException) - expect(MessageSpecificException).to be === exception_instance - end + before do + expect(exception_instance).not_to be_a(MessageSpecificException) + expect(MessageSpecificException).to be === exception_instance + end - include_examples "requeues on matching exception", MessageSpecificException, MessageSpecificException.name do - define_method(:raise_exception) { raise exception_instance } - end + include_examples "requeues on matching exception", MessageSpecificException, MessageSpecificException.name do + define_method(:raise_exception) { raise exception_instance } end end end From 4c959529a9ab7630785c57ed3dd6d088fcb210eb Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Thu, 12 Jun 2014 13:10:30 -0700 Subject: [PATCH 56/83] Use new `requeue` qless-core command. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This ensures that you’ll get an error if the job has already been cancelled, rather than resurrecting it. --- lib/qless/job.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index 9d68d0fb..254643d8 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -225,7 +225,7 @@ def to_hash # Move this from it's current queue into another def requeue(queue, opts = {}) note_state_change :requeue do - @client.call('put', @client.worker_name, queue, @jid, @klass_name, + @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, JSON.dump(opts.fetch(:data, @data)), opts.fetch(:delay, 0), 'priority', opts.fetch(:priority, @priority), From 1a63c47d95eb7fb80528a59486fac1212237b195 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Wed, 18 Jun 2014 15:39:26 -0700 Subject: [PATCH 57/83] add after_retry callback handler for RetryExceptions middleware --- lib/qless/middleware/retry_exceptions.rb | 9 ++- spec/unit/middleware/retry_exceptions_spec.rb | 74 ++++++------------- 2 files changed, 31 insertions(+), 52 deletions(-) diff --git a/lib/qless/middleware/retry_exceptions.rb b/lib/qless/middleware/retry_exceptions.rb index d97b69eb..750938b0 100644 --- a/lib/qless/middleware/retry_exceptions.rb +++ b/lib/qless/middleware/retry_exceptions.rb @@ -17,13 +17,16 @@ def around_perform(job) attempt_num = (job.original_retries - job.retries_left) + 1 failure = Qless.failure_formatter.format(job, error) job.retry(backoff_strategy.call(attempt_num, error), *failure) + + after_retry_callbacks.each { |callback| callback.call(error, job) } end def retryable_exception_classes @retryable_exception_classes ||= [] end - def retry_on(*exception_classes) + def retry_on(*exception_classes, &block) + after_retry_callbacks << block if block_given? retryable_exception_classes.push(*exception_classes) end @@ -37,6 +40,10 @@ def backoff_strategy @backoff_strategy ||= NO_BACKOFF_STRATEGY end + def after_retry_callbacks + @after_retry_callbacks ||= [] + end + def exponential(base, options = {}) fuzz_factor = options.fetch(:fuzz_factor, 0) diff --git a/spec/unit/middleware/retry_exceptions_spec.rb b/spec/unit/middleware/retry_exceptions_spec.rb index f548b86a..db5903f0 100644 --- a/spec/unit/middleware/retry_exceptions_spec.rb +++ b/spec/unit/middleware/retry_exceptions_spec.rb @@ -35,23 +35,22 @@ def perform end def add_retry_callback - container.use_on_retry_callback { |error, job| callback_catcher << [error, job] } + callback = ->(error, job) { callback_catcher << [error, job] } + container.after_retry_callbacks << callback end def callback_catcher @callback_catcher ||= [] end - describe '.use_on_retry_callback' do - it 'uses a default callback if none is given' do - expect(container.on_retry_callback).to eq( - RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) - end - + describe '.retry_on' do it 'accepts a block to set an after retry callback' do - container.use_on_retry_callback { |*| true } - expect(container.on_retry_callback).not_to eq( - RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) + container = container_class.new + container.extend(RetryExceptions) + + expect { + container.retry_on(matched_exception) { |*| true } + }.to change {container.after_retry_callbacks.size }.from(0).to(1) end end @@ -160,61 +159,34 @@ def perform_and_track_delays end context 'with an exponential backoff retry strategy' do - it 'generates an exponential delay' do + before do container.instance_eval do use_backoff_strategy exponential(10) end - - delays = perform_and_track_delays - - expect(delays).to eq([10, 100, 1_000, 10_000, 100_000]) - end - - it 'generates an exponential delay using explicitly given factor' do - container.instance_eval do - use_backoff_strategy exponential(10, factor: 3) - end - - delays = perform_and_track_delays - - expect(delays).to eq([10, 30, 90, 270, 810]) end - it 'when fuzz_factor given, dissipate delays over range' do - container.instance_eval do - use_backoff_strategy exponential(10, fuzz_factor: 0.3) - end - + it 'uses an exponential delay' do delays = perform_and_track_delays - - [10, 100, 1_000, 10_000, 100_000].zip(delays).each do |unfuzzed, actual| - expect(actual).not_to eq(unfuzzed) - expect(actual).to be_within(30).percent_of(unfuzzed) - end + expect(delays).to eq([10, 100, 1_000, 10_000, 100_000]) end + end - it 'combines factor and fuzz_factor' do + context 'with an exponential backoff retry strategy and fuzz factor' do + before do container.instance_eval do - use_backoff_strategy exponential(100, factor: 2, fuzz_factor: 0.2) - end - - delays = perform_and_track_delays - - [100, 200, 400, 800, 1600].zip(delays).each do |unfuzzed, actual| - expect(actual).not_to eq(unfuzzed) - expect(actual).to be_within(20).percent_of(unfuzzed) + use_backoff_strategy exponential(10, fuzz_factor: 0.5) end end - it 'can be reused by multiple jobs' do - container.instance_eval do - use_backoff_strategy exponential(10, factor: 2) - end - perform_and_track_delays - + it 'adds some randomness to fuzz it' do delays = perform_and_track_delays + expect(delays).not_to eq([10, 100, 1_000, 10_000, 100_000]) - expect(delays).to eq([10, 20, 40, 80, 160]) + expect(delays[0]).to be_within(50).percent_of(10) + expect(delays[1]).to be_within(50).percent_of(100) + expect(delays[2]).to be_within(50).percent_of(1_000) + expect(delays[3]).to be_within(50).percent_of(10_000) + expect(delays[4]).to be_within(50).percent_of(100_000) end end end From b49e454f9f8321cdf1f2c2c6746e36471cf8fdf0 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Wed, 18 Jun 2014 16:03:45 -0700 Subject: [PATCH 58/83] add an after_requeue callback to RequeueExceptions --- lib/qless/middleware/requeue_exceptions.rb | 9 +++- .../middleware/requeue_exceptions_spec.rb | 49 +++++++++++++++++-- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/lib/qless/middleware/requeue_exceptions.rb b/lib/qless/middleware/requeue_exceptions.rb index eb1d2651..a5fdd7cb 100644 --- a/lib/qless/middleware/requeue_exceptions.rb +++ b/lib/qless/middleware/requeue_exceptions.rb @@ -35,13 +35,18 @@ def raise_if_exhausted_requeues(error, requeues) end end - def requeue_on(*exceptions, options) + def requeue_on(*exceptions, options, &block) + after_requeue_callbacks << block if block_given? RequeueableException.from_splat_and_options( *exceptions, options).each do |exc| requeueable_exceptions[exc.klass] = exc end end + def after_requeue_callbacks + @after_requeue_callbacks ||= [] + end + def around_perform(job) super rescue *requeueable_exceptions.keys => e @@ -55,6 +60,8 @@ def around_perform(job) requeues_by_exception[config.klass.name] += 1 job.requeue(job.queue_name, delay: config.delay, data: job.data) + + after_requeue_callbacks.each { |callback| callback.call(e, job) } end def requeueable_exceptions diff --git a/spec/unit/middleware/requeue_exceptions_spec.rb b/spec/unit/middleware/requeue_exceptions_spec.rb index ae42b138..a54b0a8b 100644 --- a/spec/unit/middleware/requeue_exceptions_spec.rb +++ b/spec/unit/middleware/requeue_exceptions_spec.rb @@ -28,6 +28,11 @@ def around_perform(job) matched_exception_2 = KeyError unmatched_exception = RegexpError + let(:requeue_on_args) do + [matched_exception_1, matched_exception_2, MessageSpecificException, + delay_range: delay_range, max_attempts: max_attempts] + end + module MessageSpecificException def self.===(other) ArgumentError === other && other.message.include?("foo") @@ -36,16 +41,32 @@ def self.===(other) before do container.extend(RequeueExceptions) - container.requeue_on(matched_exception_1, matched_exception_2, - MessageSpecificException, - delay_range: delay_range, - max_attempts: max_attempts) + container.requeue_on(*requeue_on_args) + end + + def add_requeue_callback + callback = ->(error, job) { callback_catcher << [error, job] } + container.after_requeue_callbacks << callback + end + + def callback_catcher + @callback_catcher ||= [] end def perform container.around_perform(job) end + describe '.requeue_on' do + it 'accepts a block to set an after requeue callback' do + container.extend(RequeueExceptions) + + expect { + container.requeue_on(*requeue_on_args) { |*| true } + }.to change {container.after_requeue_callbacks.size }.from(0).to(1) + end + end + context 'when no exception is raised' do before { container.perform = -> { } } @@ -62,6 +83,16 @@ def perform job.should_not_receive(:requeue) expect { perform }.to raise_error(unmatched_exception) end + + context 'when an after requeue callback is set' do + before { add_requeue_callback } + + it 'does not call the callback' do + expect { perform }.to raise_error(unmatched_exception) + + expect(callback_catcher.size).to eq(0) + end + end end shared_context "requeues on matching exception" do |exception, exception_name| @@ -129,6 +160,16 @@ def perform expect { perform }.to raise_error(exception) end + + context 'when an after requeue callback is set' do + before { add_requeue_callback } + + it 'calls the callback' do + expect { + perform + }.to change { callback_catcher.size }.from(0).to(1) + end + end end context "when a matched exception is raised" do From 0f7370ba11b2a07f56ef462b7919d4fae8af2508 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Wed, 18 Jun 2014 16:18:09 -0700 Subject: [PATCH 59/83] check block.nil? instead of block_given? --- lib/qless/middleware/requeue_exceptions.rb | 2 +- lib/qless/middleware/retry_exceptions.rb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/qless/middleware/requeue_exceptions.rb b/lib/qless/middleware/requeue_exceptions.rb index a5fdd7cb..f3a3f3e8 100644 --- a/lib/qless/middleware/requeue_exceptions.rb +++ b/lib/qless/middleware/requeue_exceptions.rb @@ -36,7 +36,7 @@ def raise_if_exhausted_requeues(error, requeues) end def requeue_on(*exceptions, options, &block) - after_requeue_callbacks << block if block_given? + after_requeue_callbacks << block unless block.nil? RequeueableException.from_splat_and_options( *exceptions, options).each do |exc| requeueable_exceptions[exc.klass] = exc diff --git a/lib/qless/middleware/retry_exceptions.rb b/lib/qless/middleware/retry_exceptions.rb index 750938b0..33127031 100644 --- a/lib/qless/middleware/retry_exceptions.rb +++ b/lib/qless/middleware/retry_exceptions.rb @@ -26,7 +26,7 @@ def retryable_exception_classes end def retry_on(*exception_classes, &block) - after_retry_callbacks << block if block_given? + after_retry_callbacks << block unless block.nil? retryable_exception_classes.push(*exception_classes) end From e0d9cdce69d1dd8bba8539c3aa329263923f9ca7 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Wed, 18 Jun 2014 17:13:55 -0700 Subject: [PATCH 60/83] only allow one callback and provide explicit method for setting callback --- lib/qless/middleware/requeue_exceptions.rb | 14 +++++--- lib/qless/middleware/retry_exceptions.rb | 14 +++++--- .../middleware/requeue_exceptions_spec.rb | 35 ++++++++++++------- spec/unit/middleware/retry_exceptions_spec.rb | 28 +++++++++------ 4 files changed, 58 insertions(+), 33 deletions(-) diff --git a/lib/qless/middleware/requeue_exceptions.rb b/lib/qless/middleware/requeue_exceptions.rb index f3a3f3e8..a148f27a 100644 --- a/lib/qless/middleware/requeue_exceptions.rb +++ b/lib/qless/middleware/requeue_exceptions.rb @@ -35,16 +35,20 @@ def raise_if_exhausted_requeues(error, requeues) end end - def requeue_on(*exceptions, options, &block) - after_requeue_callbacks << block unless block.nil? + def requeue_on(*exceptions, options) RequeueableException.from_splat_and_options( *exceptions, options).each do |exc| requeueable_exceptions[exc.klass] = exc end end - def after_requeue_callbacks - @after_requeue_callbacks ||= [] + DEFAULT_ON_REQUEUE_CALLBACK = lambda { |error, job| } + def use_on_requeue_callback(&block) + @on_requeue_callback = block if block + end + + def on_requeue_callback + @on_requeue_callback ||= DEFAULT_ON_REQUEUE_CALLBACK end def around_perform(job) @@ -61,7 +65,7 @@ def around_perform(job) requeues_by_exception[config.klass.name] += 1 job.requeue(job.queue_name, delay: config.delay, data: job.data) - after_requeue_callbacks.each { |callback| callback.call(e, job) } + on_requeue_callback.call(e, job) end def requeueable_exceptions diff --git a/lib/qless/middleware/retry_exceptions.rb b/lib/qless/middleware/retry_exceptions.rb index 33127031..baa7c68e 100644 --- a/lib/qless/middleware/retry_exceptions.rb +++ b/lib/qless/middleware/retry_exceptions.rb @@ -18,15 +18,14 @@ def around_perform(job) failure = Qless.failure_formatter.format(job, error) job.retry(backoff_strategy.call(attempt_num, error), *failure) - after_retry_callbacks.each { |callback| callback.call(error, job) } + on_retry_callback.call(error, job) end def retryable_exception_classes @retryable_exception_classes ||= [] end - def retry_on(*exception_classes, &block) - after_retry_callbacks << block unless block.nil? + def retry_on(*exception_classes) retryable_exception_classes.push(*exception_classes) end @@ -40,8 +39,13 @@ def backoff_strategy @backoff_strategy ||= NO_BACKOFF_STRATEGY end - def after_retry_callbacks - @after_retry_callbacks ||= [] + DEFAULT_ON_RETRY_CALLBACK = lambda { |error, job| } + def use_on_retry_callback(&block) + @on_retry_callback = block if block + end + + def on_retry_callback + @on_retry_callback ||= DEFAULT_ON_RETRY_CALLBACK end def exponential(base, options = {}) diff --git a/spec/unit/middleware/requeue_exceptions_spec.rb b/spec/unit/middleware/requeue_exceptions_spec.rb index a54b0a8b..592dbc62 100644 --- a/spec/unit/middleware/requeue_exceptions_spec.rb +++ b/spec/unit/middleware/requeue_exceptions_spec.rb @@ -23,6 +23,7 @@ def around_perform(job) end let(:delay_range) { (0..30) } let(:max_attempts) { 20 } + let(:add_default_requeue) { true } matched_exception_1 = ZeroDivisionError matched_exception_2 = KeyError @@ -30,7 +31,7 @@ def around_perform(job) let(:requeue_on_args) do [matched_exception_1, matched_exception_2, MessageSpecificException, - delay_range: delay_range, max_attempts: max_attempts] + {delay_range: delay_range, max_attempts: max_attempts}] end module MessageSpecificException @@ -41,12 +42,11 @@ def self.===(other) before do container.extend(RequeueExceptions) - container.requeue_on(*requeue_on_args) + container.requeue_on(*requeue_on_args) if add_default_requeue end - def add_requeue_callback - callback = ->(error, job) { callback_catcher << [error, job] } - container.after_requeue_callbacks << callback + def set_requeue_callback + container.use_on_requeue_callback { |error, job| callback_catcher << [error, job] } end def callback_catcher @@ -57,13 +57,22 @@ def perform container.around_perform(job) end - describe '.requeue_on' do - it 'accepts a block to set an after requeue callback' do - container.extend(RequeueExceptions) + describe '.use_on_requeue_callback' do + let(:add_default_requeue) { false } + + before { container.extend(RequeueExceptions) } - expect { - container.requeue_on(*requeue_on_args) { |*| true } - }.to change {container.after_requeue_callbacks.size }.from(0).to(1) + it 'uses a default callback if none is given' do + container.requeue_on(*requeue_on_args) + expect(container.on_requeue_callback).to eq( + RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) + end + + it 'accepts a block to set an after requeue callback' do + container.use_on_requeue_callback { |*| true } + container.requeue_on(*requeue_on_args) + expect(container.on_requeue_callback).not_to eq( + RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) end end @@ -85,7 +94,7 @@ def perform end context 'when an after requeue callback is set' do - before { add_requeue_callback } + before { set_requeue_callback } it 'does not call the callback' do expect { perform }.to raise_error(unmatched_exception) @@ -162,7 +171,7 @@ def perform end context 'when an after requeue callback is set' do - before { add_requeue_callback } + before { set_requeue_callback } it 'calls the callback' do expect { diff --git a/spec/unit/middleware/retry_exceptions_spec.rb b/spec/unit/middleware/retry_exceptions_spec.rb index db5903f0..4538db61 100644 --- a/spec/unit/middleware/retry_exceptions_spec.rb +++ b/spec/unit/middleware/retry_exceptions_spec.rb @@ -24,10 +24,11 @@ def around_perform(job) end let(:matched_exception) { ZeroDivisionError } let(:unmatched_exception) { RegexpError } + let(:add_default_retry) { true } before do container.extend(RetryExceptions) - container.retry_on matched_exception + container.retry_on matched_exception if add_default_retry end def perform @@ -35,22 +36,29 @@ def perform end def add_retry_callback - callback = ->(error, job) { callback_catcher << [error, job] } - container.after_retry_callbacks << callback + container.use_on_retry_callback { |error, job| callback_catcher << [error, job] } end def callback_catcher @callback_catcher ||= [] end - describe '.retry_on' do - it 'accepts a block to set an after retry callback' do - container = container_class.new - container.extend(RetryExceptions) + describe '.use_on_retry_callback' do + let(:add_default_retry) { false } + + before { container.extend(RetryExceptions) } - expect { - container.retry_on(matched_exception) { |*| true } - }.to change {container.after_retry_callbacks.size }.from(0).to(1) + it 'uses a default callback if none is given' do + container.retry_on(matched_exception) + expect(container.on_retry_callback).to eq( + RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) + end + + it 'accepts a block to set an after retry callback' do + container.use_on_retry_callback { |*| true } + container.retry_on(matched_exception) + expect(container.on_retry_callback).not_to eq( + RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) end end From 094bbc17d0c16c4684f9290061950153864f1543 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Wed, 18 Jun 2014 17:16:31 -0700 Subject: [PATCH 61/83] get rid of unnecessary test config code --- spec/unit/middleware/requeue_exceptions_spec.rb | 17 ++++------------- spec/unit/middleware/retry_exceptions_spec.rb | 9 +-------- 2 files changed, 5 insertions(+), 21 deletions(-) diff --git a/spec/unit/middleware/requeue_exceptions_spec.rb b/spec/unit/middleware/requeue_exceptions_spec.rb index 592dbc62..ee4d754a 100644 --- a/spec/unit/middleware/requeue_exceptions_spec.rb +++ b/spec/unit/middleware/requeue_exceptions_spec.rb @@ -23,17 +23,11 @@ def around_perform(job) end let(:delay_range) { (0..30) } let(:max_attempts) { 20 } - let(:add_default_requeue) { true } matched_exception_1 = ZeroDivisionError matched_exception_2 = KeyError unmatched_exception = RegexpError - let(:requeue_on_args) do - [matched_exception_1, matched_exception_2, MessageSpecificException, - {delay_range: delay_range, max_attempts: max_attempts}] - end - module MessageSpecificException def self.===(other) ArgumentError === other && other.message.include?("foo") @@ -42,7 +36,10 @@ def self.===(other) before do container.extend(RequeueExceptions) - container.requeue_on(*requeue_on_args) if add_default_requeue + container.requeue_on(matched_exception_1, matched_exception_2, + MessageSpecificException, + delay_range: delay_range, + max_attempts: max_attempts) end def set_requeue_callback @@ -58,19 +55,13 @@ def perform end describe '.use_on_requeue_callback' do - let(:add_default_requeue) { false } - - before { container.extend(RequeueExceptions) } - it 'uses a default callback if none is given' do - container.requeue_on(*requeue_on_args) expect(container.on_requeue_callback).to eq( RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) end it 'accepts a block to set an after requeue callback' do container.use_on_requeue_callback { |*| true } - container.requeue_on(*requeue_on_args) expect(container.on_requeue_callback).not_to eq( RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) end diff --git a/spec/unit/middleware/retry_exceptions_spec.rb b/spec/unit/middleware/retry_exceptions_spec.rb index 4538db61..459d0f06 100644 --- a/spec/unit/middleware/retry_exceptions_spec.rb +++ b/spec/unit/middleware/retry_exceptions_spec.rb @@ -24,11 +24,10 @@ def around_perform(job) end let(:matched_exception) { ZeroDivisionError } let(:unmatched_exception) { RegexpError } - let(:add_default_retry) { true } before do container.extend(RetryExceptions) - container.retry_on matched_exception if add_default_retry + container.retry_on matched_exception end def perform @@ -44,19 +43,13 @@ def callback_catcher end describe '.use_on_retry_callback' do - let(:add_default_retry) { false } - - before { container.extend(RetryExceptions) } - it 'uses a default callback if none is given' do - container.retry_on(matched_exception) expect(container.on_retry_callback).to eq( RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) end it 'accepts a block to set an after retry callback' do container.use_on_retry_callback { |*| true } - container.retry_on(matched_exception) expect(container.on_retry_callback).not_to eq( RetryExceptions::DEFAULT_ON_RETRY_CALLBACK) end From eb0b5a7a0c6c024e4f5a86fbc1638c1cb4be2f4a Mon Sep 17 00:00:00 2001 From: Myron Marston Date: Wed, 30 Jul 2014 11:28:54 -0700 Subject: [PATCH 62/83] Fix typo. --- lib/qless/job.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index 254643d8..cb8b2eb6 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -75,7 +75,7 @@ def perform elsif !klass.respond_to?(:perform) # If the klass doesn't have a :perform method, we should raise an error fail("#{queue_name}-method-missing", - "#{klass_name} has no peform method") + "#{klass_name} has no perform method") else klass.perform(self) end From 18ad696428211478a0fc6e99009cab28fac97213 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Mon, 4 May 2015 11:50:54 -0400 Subject: [PATCH 63/83] fix requeue from losing throttles --- lib/qless/job.rb | 23 ++++++++++++++++------- lib/qless/queue.rb | 12 +----------- lib/qless/server.rb | 2 +- spec/unit/job_spec.rb | 22 +++++++++++++++++++++- 4 files changed, 39 insertions(+), 20 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index cb8b2eb6..de4e8b19 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -106,6 +106,18 @@ def self.build(client, klass, attributes = {}) new(client, attributes) end + # Converts a hash of job options (as returned by job.to_hash) into the array + # format the qless api expects. + def self.build_opts_array(opts) + result = [] + result << opts.fetch(:delay, 0) + result.concat(['priority', opts.fetch(:priority, 0)]) + result.concat(['tags', JSON.generate(opts.fetch(:tags, []))]) + result.concat(['retries', opts.fetch(:retries, 5)]) + result.concat(['depends', JSON.generate(opts.fetch(:depends, []))]) + result.concat(['throttles', JSON.generate(opts.fetch(:throttles, []))]) + end + def self.middlewares_on(job_klass) singleton_klass = job_klass.singleton_class singleton_klass.ancestors.select do |ancestor| @@ -218,7 +230,8 @@ def to_hash retries_left: retries_left, data: data, priority: priority, - tags: tags + tags: tags, + throttles: throttles, } end @@ -226,12 +239,8 @@ def to_hash def requeue(queue, opts = {}) note_state_change :requeue do @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, - JSON.dump(opts.fetch(:data, @data)), - opts.fetch(:delay, 0), - 'priority', opts.fetch(:priority, @priority), - 'tags', JSON.dump(opts.fetch(:tags, @tags)), - 'retries', opts.fetch(:retries, @original_retries), - 'depends', JSON.dump(opts.fetch(:depends, @dependencies)) + JSON.generate(opts.fetch(:data, @data)), + *self.class.build_opts_array(opts) ) end end diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index f8102a8b..a6accc25 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -109,17 +109,7 @@ def put(klass, data, opts = {}) (opts[:jid] || Qless.generate_jid), klass.is_a?(String) ? klass : klass.name, JSON.generate(data), - opts.fetch(:delay, 0), - 'priority', - opts.fetch(:priority, 0), - 'tags', - JSON.generate(opts.fetch(:tags, [])), - 'retries', - opts.fetch(:retries, 5), - 'depends', - JSON.generate(opts.fetch(:depends, [])), - 'throttles', - JSON.generate(opts.fetch(:throttles, [])), + *Job.build_opts_array(opts), ) end diff --git a/lib/qless/server.rb b/lib/qless/server.rb index b5f9944d..e069e493 100755 --- a/lib/qless/server.rb +++ b/lib/qless/server.rb @@ -215,7 +215,7 @@ def strftime(t) throttle.maximum = data['maximum'] end end - + put '/throttle' do # Expects a JSON object: {'id': id, 'expiration': expiration} data = JSON.parse(request.body.read) diff --git a/spec/unit/job_spec.rb b/spec/unit/job_spec.rb index b3d2da22..16a07e36 100644 --- a/spec/unit/job_spec.rb +++ b/spec/unit/job_spec.rb @@ -37,6 +37,16 @@ def around_perform(job) end end + describe '.build_opts_array' do + it 'should return a correctly build array' do + # [delay, priority, priority_value, tags, tags_value, retries, retries_value, depends, depends_value, + # throttles, throttles_value] + expected = [0, "priority", 0, "tags", "[]", "retries", 5, "depends", "[]", "throttles", "[]"] + job = Job.build(client, JobClass) + expect(Job.build_opts_array(job.to_hash)).to eq(expected) + end + end + describe '#klass' do it 'returns the class constant' do job = Job.build(client, JobClass, data: {}) @@ -167,7 +177,7 @@ class MyCustomError < StandardError; end job.send(meth, *args) end.to raise_error(MyCustomError) - job.state_changed?.should be_false + job.state_changed?.should be false end it 'triggers before and after callbacks' do @@ -203,6 +213,16 @@ class MyCustomError < StandardError; end spawned_from_jid: "foo" ) end + + it 'returns the throttles of the job' do + job = Job.build(client, JobClass, 'throttles' => ['my-throttle']) + + expect(job.to_hash).to include( + klass_name: "Qless::JobClass", + state: "running", + throttles: ['my-throttle'] + ) + end end describe '#inspect' do From 0fe9ee661e6dce0b13df829b4d70e7c72810b1a4 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 6 May 2015 13:22:15 -0400 Subject: [PATCH 64/83] fix reeqeueue --- lib/qless/job.rb | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index de4e8b19..8919dccb 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -44,7 +44,8 @@ class Job < BaseJob attr_reader :klass_name, :tracked, :dependencies, :dependents attr_reader :original_retries, :retries_left, :raw_queue_history attr_reader :state_changed - attr_accessor :data, :priority, :tags + attr_accessor :data, :priority, :tags, :throttles + alias_method(:state_changed?, :state_changed) MiddlewareMisconfiguredError = Class.new(StandardError) @@ -63,6 +64,9 @@ def perform return fail("#{queue_name}-NameError", "Cannot find #{klass_name}") end + # log a real process executing job -- before we start processing + log("started by pid:#{Process.pid}") + middlewares = Job.middlewares_on(klass) if middlewares.last == SupportsMiddleware @@ -99,7 +103,8 @@ def self.build(client, klass, attributes = {}) 'failure' => {}, 'history' => [], 'dependencies' => [], - 'dependents' => [] + 'dependents' => [], + 'throttles' => [], } attributes = defaults.merge(Qless.stringify_hash_keys(attributes)) attributes['data'] = JSON.dump(attributes['data']) @@ -128,7 +133,7 @@ def self.middlewares_on(job_klass) def initialize(client, atts) super(client, atts.fetch('jid')) %w{jid data priority tags state tracked - failure dependencies dependents spawned_from_jid}.each do |att| + failure dependencies dependents throttles spawned_from_jid}.each do |att| instance_variable_set(:"@#{att}", atts.fetch(att)) end @@ -180,6 +185,10 @@ def ttl @expires_at - Time.now.to_f end + def throttle_objects + throttles.map { |name| Throttle.new(name, client) } + end + def reconnect_to_redis @client.redis.client.reconnect end From 484c387d69f8ff349bed62ca698d2fedc9dbe9a7 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 6 May 2015 13:28:16 -0400 Subject: [PATCH 65/83] undo bad merge --- lib/qless/lua/qless.lua | 12 +- lib/qless/middleware/requeue_exceptions.rb | 32 +- .../middleware/requeue_exceptions_spec.rb | 336 +++++++++++------- 3 files changed, 232 insertions(+), 148 deletions(-) diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 123905f1..d17978c2 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 6451b7cbecbae484d32686e64e1d02378ad383f7 +-- Current SHA: 5dbc192de654731c02f5e3ecb1ff00b00852121f -- This is a generated file local Qless = { ns = 'ql:' @@ -419,11 +419,11 @@ function QlessJob:data(...) end end -function QlessJob:complete(now, worker, queue, data, ...) +function QlessJob:complete(now, worker, queue, raw_data, ...) assert(worker, 'Complete(): Arg "worker" missing') assert(queue , 'Complete(): Arg "queue" missing') - data = assert(cjson.decode(data), - 'Complete(): Arg "data" missing or not JSON: ' .. tostring(data)) + local data = assert(cjson.decode(raw_data), + 'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data)) local options = {} for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end @@ -461,8 +461,8 @@ function QlessJob:complete(now, worker, queue, data, ...) self:history(now, 'done') - if data then - redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data)) + if raw_data then + redis.call('hset', QlessJob.ns .. self.jid, 'data', raw_data) end local queue_obj = Qless.queue(queue) diff --git a/lib/qless/middleware/requeue_exceptions.rb b/lib/qless/middleware/requeue_exceptions.rb index a148f27a..98311325 100644 --- a/lib/qless/middleware/requeue_exceptions.rb +++ b/lib/qless/middleware/requeue_exceptions.rb @@ -17,17 +17,19 @@ module Middleware # to be retried many times, w/o having other transient errors retried so # many times. module RequeueExceptions - RequeueableException = Struct.new(:klass, :delay_range, :max_attempts) do + RequeueableException = Struct.new(:klass, :delay_min, :delay_span, :max_attempts) do def self.from_splat_and_options(*klasses, options) + delay_range = options.fetch(:delay_range) + delay_min = Float(delay_range.min) + delay_span = Float(delay_range.max) - Float(delay_range.min) + max_attempts = options.fetch(:max_attempts) klasses.map do |klass| - new(klass, - options.fetch(:delay_range).to_a, - options.fetch(:max_attempts)) + new(klass, delay_min, delay_span, max_attempts) end end def delay - delay_range.sample + delay_min + Random.rand(delay_span) end def raise_if_exhausted_requeues(error, requeues) @@ -51,21 +53,29 @@ def on_requeue_callback @on_requeue_callback ||= DEFAULT_ON_REQUEUE_CALLBACK end - def around_perform(job) - super - rescue *requeueable_exceptions.keys => e - config = requeuable_exception_for(e) + def handle_exception(job, error) + config = requeuable_exception_for(error) requeues_by_exception = (job.data['requeues_by_exception'] ||= {}) requeues_by_exception[config.klass.name] ||= 0 config.raise_if_exhausted_requeues( - e, requeues_by_exception[config.klass.name]) + error, requeues_by_exception[config.klass.name]) requeues_by_exception[config.klass.name] += 1 job.requeue(job.queue_name, delay: config.delay, data: job.data) - on_requeue_callback.call(e, job) + on_requeue_callback.call(error, job) + end + + def around_perform(job) + super + rescue *requeueable_exceptions.keys => e + handle_exception(job, e) + end + + def requeueable?(exception) + requeueable_exceptions.member?(exception) end def requeueable_exceptions diff --git a/spec/unit/middleware/requeue_exceptions_spec.rb b/spec/unit/middleware/requeue_exceptions_spec.rb index ee4d754a..95f294ff 100644 --- a/spec/unit/middleware/requeue_exceptions_spec.rb +++ b/spec/unit/middleware/requeue_exceptions_spec.rb @@ -16,191 +16,265 @@ def around_perform(job) end end - let(:container) { container_class.new } - let(:job) do - instance_double( - 'Qless::Job', requeue: nil, queue_name: 'my-queue', data: {}) + let(:container) do + container = container_class.new + container.extend(RequeueExceptions) + container end - let(:delay_range) { (0..30) } - let(:max_attempts) { 20 } - - matched_exception_1 = ZeroDivisionError - matched_exception_2 = KeyError - unmatched_exception = RegexpError - module MessageSpecificException - def self.===(other) - ArgumentError === other && other.message.include?("foo") + describe ".requeue_on" do + it "does not throw with empty class list" do + container.requeue_on(delay_range: 1..10, + max_attempts: 1) end - end - before do - container.extend(RequeueExceptions) - container.requeue_on(matched_exception_1, matched_exception_2, - MessageSpecificException, - delay_range: delay_range, - max_attempts: max_attempts) - end + it "throws KeyError if no max_attempts" do + expect do + container.requeue_on(delay_range: 1..10) + end.to raise_error(KeyError) + end - def set_requeue_callback - container.use_on_requeue_callback { |error, job| callback_catcher << [error, job] } - end + it "throws KeyError if no delay_range" do + expect do + container.requeue_on(max_attempts: 1) + end.to raise_error(KeyError) + end - def callback_catcher - @callback_catcher ||= [] - end + it "throws NoMethodError if delay_range does not respond to .min or .max" do + expect do + container.requeue_on(delay_range: 1, max_attempts: 1) + end.to raise_error(NoMethodError) + end - def perform - container.around_perform(job) - end + it "throws ArgumentError if delay_range is not numerical" do + expect do + container.requeue_on(delay_range: "a".."z", max_attempts: 1) + end.to raise_error(ArgumentError) + end - describe '.use_on_requeue_callback' do - it 'uses a default callback if none is given' do - expect(container.on_requeue_callback).to eq( - RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) + it "throws TypeError if delay_range is empty" do + expect do + container.requeue_on(delay_range: 2..1, max_attempts: 1) + end.to raise_error(TypeError) end - it 'accepts a block to set an after requeue callback' do - container.use_on_requeue_callback { |*| true } - expect(container.on_requeue_callback).not_to eq( - RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) + it "throws TypeError on empty delay_range" do + expect do + container.requeue_on(delay_range: 1..0, max_attempts: 1) + end.to raise_error(TypeError) end - end - context 'when no exception is raised' do - before { container.perform = -> { } } + it "adds exceptions to requeable collection on success" do + container.requeue_on(ArgumentError, TypeError, delay_range: 1..2, max_attempts: 2) + expect(container.requeueable_exceptions).to include(ArgumentError, TypeError) + end - it 'does not requeue the job' do - job.should_not_receive(:requeue) - perform + it "updates exceptions on repeated .requeue_on" do + container.requeue_on(ArgumentError, TypeError, delay_range: 1..2, max_attempts: 2) + container.requeue_on(TypeError, KeyError, delay_range: 1..2, max_attempts: 3) + expect(container.requeueable_exceptions).to include(ArgumentError, TypeError, KeyError) + expect(container.requeueable_exceptions[KeyError].max_attempts).to eq(3); end end - context 'when an unmatched exception is raised' do - before { container.perform = -> { raise unmatched_exception } } - - it 'allows the error to propagate' do - job.should_not_receive(:requeue) - expect { perform }.to raise_error(unmatched_exception) + describe ".requeueable?" do + before do + container.requeue_on(KeyError, delay_range: 1..2, max_attempts: 3) end - context 'when an after requeue callback is set' do - before { set_requeue_callback } - - it 'does not call the callback' do - expect { perform }.to raise_error(unmatched_exception) + it 'returns false if exception is not requeue_on' do + expect(container.requeueable?(TypeError)).to be(false) + end - expect(callback_catcher.size).to eq(0) - end + it 'returns true when exception requeued on' do + expect(container.requeueable?(KeyError)).to be(true) end end - shared_context "requeues on matching exception" do |exception, exception_name| - before { container.perform = -> { raise_exception } } + context "when requeue_on successful" do - it 'requeues the job' do - job.should_receive(:requeue).with('my-queue', anything) - perform + let(:job) do + instance_double( + 'Qless::Job', requeue: nil, queue_name: 'my-queue', data: {}) end + let(:delay_range) { (0..30) } + let(:max_attempts) { 20 } - it 'uses a random delay from the delay_range' do - Kernel.srand(100) - sample = delay_range.to_a.sample + matched_exception_1 = ZeroDivisionError + matched_exception_2 = KeyError + unmatched_exception = RegexpError - job.should_receive(:requeue).with( - 'my-queue', hash_including(delay: sample)) + module MessageSpecificException + def self.===(other) + ArgumentError === other && other.message.include?("foo") + end + end - Kernel.srand(100) - perform + before do + ## container.extend(RequeueExceptions) + container.requeue_on(matched_exception_1, matched_exception_2, + MessageSpecificException, + delay_range: delay_range, + max_attempts: max_attempts) end - it 'tracks the number of requeues for this error' do - expected_first_time = { - 'requeues_by_exception' => { exception_name => 1 } } - job.should_receive(:requeue).with('my-queue', hash_including( - data: expected_first_time - )) - perform + def set_requeue_callback + container.use_on_requeue_callback { |error, job| callback_catcher << [error, job] } + end - job.data.merge!(expected_first_time) + def callback_catcher + @callback_catcher ||= [] + end - job.should_receive(:requeue).with('my-queue', hash_including( - data: { 'requeues_by_exception' => { exception_name => 2 } } - )) - perform + def perform + container.around_perform(job) end - it 'preserves other requeues_by_exception values' do - job.data['requeues_by_exception'] = { 'SomeKlass' => 3 } + describe '.use_on_requeue_callback' do + it 'uses a default callback if none is given' do + expect(container.on_requeue_callback).to eq( + RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) + end - job.should_receive(:requeue).with('my-queue', hash_including( - data: { - 'requeues_by_exception' => { - exception_name => 1, 'SomeKlass' => 3 - } } - )) - perform + it 'accepts a block to set an after requeue callback' do + container.use_on_requeue_callback { |*| true } + expect(container.on_requeue_callback).not_to eq( + RequeueExceptions::DEFAULT_ON_REQUEUE_CALLBACK) + end end - it 'preserves other data' do - job.data['foo'] = 3 + context 'when no exception is raised' do + before { container.perform = -> { } } - job.should_receive(:requeue).with('my-queue', hash_including( - data: { - 'requeues_by_exception' => { exception_name => 1 }, - 'foo' => 3 } - )) - perform + it 'does not requeue the job' do + job.should_not_receive(:requeue) + perform + end end - it 'allow the error to propogate after max_attempts' do - job.data['requeues_by_exception'] = { - exception_name => max_attempts } - job.should_not_receive(:requeue) + context 'when an unmatched exception is raised' do + before { container.perform = -> { raise unmatched_exception } } + + it 'allows the error to propagate' do + job.should_not_receive(:requeue) + expect { perform }.to raise_error(unmatched_exception) + end + + context 'when an after requeue callback is set' do + before { set_requeue_callback } - expect { perform }.to raise_error(exception) + it 'does not call the callback' do + expect { perform }.to raise_error(unmatched_exception) + + expect(callback_catcher.size).to eq(0) + end + end end - context 'when an after requeue callback is set' do - before { set_requeue_callback } + shared_context "requeues on matching exception" do |exception, exception_name| + before { container.perform = -> { raise_exception } } + + it 'requeues the job' do + job.should_receive(:requeue).with('my-queue', anything) + perform + end + + it 'uses a random delay from the delay_range' do + job.should_receive(:requeue) do |qname, hash| + expect(qname).to eq('my-queue') + expect(hash[:delay]).to be_between(delay_range.min, delay_range.max) + end + perform + end + + it 'tracks the number of requeues for this error' do + expected_first_time = { + 'requeues_by_exception' => { exception_name => 1 } } + job.should_receive(:requeue).with('my-queue', hash_including( + data: expected_first_time + )) + perform + + job.data.merge!(expected_first_time) - it 'calls the callback' do - expect { - perform - }.to change { callback_catcher.size }.from(0).to(1) + job.should_receive(:requeue).with('my-queue', hash_including( + data: { 'requeues_by_exception' => { exception_name => 2 } } + )) + perform + end + + it 'preserves other requeues_by_exception values' do + job.data['requeues_by_exception'] = { 'SomeKlass' => 3 } + + job.should_receive(:requeue).with('my-queue', hash_including( + data: { + 'requeues_by_exception' => { + exception_name => 1, 'SomeKlass' => 3 + } } + )) + perform + end + + it 'preserves other data' do + job.data['foo'] = 3 + + job.should_receive(:requeue).with('my-queue', hash_including( + data: { + 'requeues_by_exception' => { exception_name => 1 }, + 'foo' => 3 } + )) + perform + end + + it 'allow the error to propogate after max_attempts' do + job.data['requeues_by_exception'] = { + exception_name => max_attempts } + job.should_not_receive(:requeue) + + expect { perform }.to raise_error(exception) + end + + context 'when an after requeue callback is set' do + before { set_requeue_callback } + + it 'calls the callback' do + expect { + perform + }.to change { callback_catcher.size }.from(0).to(1) + end end end - end - context "when a matched exception is raised" do - include_examples "requeues on matching exception", matched_exception_1, matched_exception_1.name do - define_method(:raise_exception) { raise matched_exception_1 } + context "when a matched exception is raised" do + include_examples "requeues on matching exception", matched_exception_1, matched_exception_1.name do + define_method(:raise_exception) { raise matched_exception_1 } + end end - end - context "when another matched exception is raised" do - include_examples "requeues on matching exception", matched_exception_2, matched_exception_2.name do - define_method(:raise_exception) { raise matched_exception_2 } + context "when another matched exception is raised" do + include_examples "requeues on matching exception", matched_exception_2, matched_exception_2.name do + define_method(:raise_exception) { raise matched_exception_2 } + end end - end - context "when a subclass of a matched exception is raised" do - exception = Class.new(matched_exception_1) - include_examples "requeues on matching exception", exception, matched_exception_1.name do - define_method(:raise_exception) { raise exception } + context "when a subclass of a matched exception is raised" do + exception = Class.new(matched_exception_1) + include_examples "requeues on matching exception", exception, matched_exception_1.name do + define_method(:raise_exception) { raise exception } + end end - end - context "when an exception is raised that matches a listed on using `===` but not `is_a?" do - let(:exception_instance) { ArgumentError.new("Bad foo") } + context "when an exception is raised that matches a listed on using `===` but not `is_a?" do + let(:exception_instance) { ArgumentError.new("Bad foo") } - before do - expect(exception_instance).not_to be_a(MessageSpecificException) - expect(MessageSpecificException).to be === exception_instance - end + before do + expect(exception_instance).not_to be_a(MessageSpecificException) + expect(MessageSpecificException).to be === exception_instance + end - include_examples "requeues on matching exception", MessageSpecificException, MessageSpecificException.name do - define_method(:raise_exception) { raise exception_instance } + include_examples "requeues on matching exception", MessageSpecificException, MessageSpecificException.name do + define_method(:raise_exception) { raise exception_instance } + end end end end From 41859e708c4109d7da006502bc54fd0d3b38349d Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 6 May 2015 13:29:26 -0400 Subject: [PATCH 66/83] fix bad merge --- lib/qless/middleware/retry_exceptions.rb | 21 ++++--- spec/unit/middleware/retry_exceptions_spec.rb | 55 ++++++++++++++----- 2 files changed, 51 insertions(+), 25 deletions(-) diff --git a/lib/qless/middleware/retry_exceptions.rb b/lib/qless/middleware/retry_exceptions.rb index baa7c68e..2839551c 100644 --- a/lib/qless/middleware/retry_exceptions.rb +++ b/lib/qless/middleware/retry_exceptions.rb @@ -48,19 +48,18 @@ def on_retry_callback @on_retry_callback ||= DEFAULT_ON_RETRY_CALLBACK end - def exponential(base, options = {}) + # If `factor` is omitted it is set to `delay_seconds` to reproduce legacy + # behavior. + def exponential(delay_seconds, options={}) + factor = options.fetch(:factor, delay_seconds) fuzz_factor = options.fetch(:fuzz_factor, 0) - lambda do |num, _error| - unfuzzed = base**num - - fuzz = 0 - unless fuzz_factor.zero? - max_fuzz = unfuzzed * fuzz_factor - fuzz = rand(max_fuzz) * [1, -1].sample - end - - unfuzzed + fuzz + lambda do |retry_no, error| + unfuzzed = delay_seconds * factor**(retry_no - 1) + return unfuzzed if fuzz_factor.zero? + r = 2 * rand - 1 + # r is uniformly distributed in range [-1, 1] + unfuzzed * (1 + fuzz_factor * r) end end end diff --git a/spec/unit/middleware/retry_exceptions_spec.rb b/spec/unit/middleware/retry_exceptions_spec.rb index 459d0f06..f548b86a 100644 --- a/spec/unit/middleware/retry_exceptions_spec.rb +++ b/spec/unit/middleware/retry_exceptions_spec.rb @@ -160,34 +160,61 @@ def perform_and_track_delays end context 'with an exponential backoff retry strategy' do - before do + it 'generates an exponential delay' do container.instance_eval do use_backoff_strategy exponential(10) end - end - it 'uses an exponential delay' do delays = perform_and_track_delays + expect(delays).to eq([10, 100, 1_000, 10_000, 100_000]) end - end - context 'with an exponential backoff retry strategy and fuzz factor' do - before do + it 'generates an exponential delay using explicitly given factor' do container.instance_eval do - use_backoff_strategy exponential(10, fuzz_factor: 0.5) + use_backoff_strategy exponential(10, factor: 3) end + + delays = perform_and_track_delays + + expect(delays).to eq([10, 30, 90, 270, 810]) end - it 'adds some randomness to fuzz it' do + it 'when fuzz_factor given, dissipate delays over range' do + container.instance_eval do + use_backoff_strategy exponential(10, fuzz_factor: 0.3) + end + + delays = perform_and_track_delays + + [10, 100, 1_000, 10_000, 100_000].zip(delays).each do |unfuzzed, actual| + expect(actual).not_to eq(unfuzzed) + expect(actual).to be_within(30).percent_of(unfuzzed) + end + end + + it 'combines factor and fuzz_factor' do + container.instance_eval do + use_backoff_strategy exponential(100, factor: 2, fuzz_factor: 0.2) + end + + delays = perform_and_track_delays + + [100, 200, 400, 800, 1600].zip(delays).each do |unfuzzed, actual| + expect(actual).not_to eq(unfuzzed) + expect(actual).to be_within(20).percent_of(unfuzzed) + end + end + + it 'can be reused by multiple jobs' do + container.instance_eval do + use_backoff_strategy exponential(10, factor: 2) + end + perform_and_track_delays + delays = perform_and_track_delays - expect(delays).not_to eq([10, 100, 1_000, 10_000, 100_000]) - expect(delays[0]).to be_within(50).percent_of(10) - expect(delays[1]).to be_within(50).percent_of(100) - expect(delays[2]).to be_within(50).percent_of(1_000) - expect(delays[3]).to be_within(50).percent_of(10_000) - expect(delays[4]).to be_within(50).percent_of(100_000) + expect(delays).to eq([10, 20, 40, 80, 160]) end end end From 02ff8a4993d49a1f9dd18ce674aaa5dedd921ab0 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 6 May 2015 13:30:26 -0400 Subject: [PATCH 67/83] fix bad merge --- lib/qless/lua/qless-lib.lua | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 58a02c7d..80e1f385 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 6451b7cbecbae484d32686e64e1d02378ad383f7 +-- Current SHA: 5dbc192de654731c02f5e3ecb1ff00b00852121f -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -590,11 +590,11 @@ end -- ('depends', : Json of jobs it depends on in the new queue -- '["jid1", "jid2", ...]') --- -function QlessJob:complete(now, worker, queue, data, ...) +function QlessJob:complete(now, worker, queue, raw_data, ...) assert(worker, 'Complete(): Arg "worker" missing') assert(queue , 'Complete(): Arg "queue" missing') - data = assert(cjson.decode(data), - 'Complete(): Arg "data" missing or not JSON: ' .. tostring(data)) + local data = assert(cjson.decode(raw_data), + 'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data)) -- Read in all the optional parameters local options = {} @@ -645,8 +645,8 @@ function QlessJob:complete(now, worker, queue, data, ...) -- update history self:history(now, 'done') - if data then - redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data)) + if raw_data then + redis.call('hset', QlessJob.ns .. self.jid, 'data', raw_data) end -- Remove the job from the previous queue @@ -2668,7 +2668,7 @@ function QlessWorker.counts(now, worker) return response end end --- Retrieve the data fro a throttled resource +-- Retrieve the data for a throttled resource function QlessThrottle:data() -- Default values for the data local data = { From 3fc6de11bd20c3415f4b7b2c580923865cdddfd3 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 6 May 2015 13:35:17 -0400 Subject: [PATCH 68/83] wip --- lib/qless/job.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index 8919dccb..f99834a9 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -249,7 +249,7 @@ def requeue(queue, opts = {}) note_state_change :requeue do @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, JSON.generate(opts.fetch(:data, @data)), - *self.class.build_opts_array(opts) + *self.class.build_opts_array(job.to_hash.merge(opts)) ) end end From 4a013c22335e75ce22ed70fb94cc124ff17dcaf3 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Fri, 8 May 2015 12:32:48 -0400 Subject: [PATCH 69/83] test fixes, cleanup --- lib/qless/job.rb | 25 ++++++++++++++++++++++--- lib/qless/queue.rb | 4 ++-- spec/unit/job_spec.rb | 18 ++++++++++++++---- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index f99834a9..c489eb32 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -115,7 +115,8 @@ def self.build(client, klass, attributes = {}) # format the qless api expects. def self.build_opts_array(opts) result = [] - result << opts.fetch(:delay, 0) + result << JSON.generate(opts.fetch(:data, {})) + result.concat([opts.fetch(:delay, 0)]) result.concat(['priority', opts.fetch(:priority, 0)]) result.concat(['tags', JSON.generate(opts.fetch(:tags, []))]) result.concat(['retries', opts.fetch(:retries, 5)]) @@ -244,12 +245,30 @@ def to_hash } end + # Extract the enqueue options from the job + # @return [Hash] options + # @option options [Integer] :retries + # @option options [Integer] :priority + # @option options [Array] :depends + # @option options [Array] :tags + # @option options [Array] throttles + # @option options [Hash] :data + def enqueue_opts + { + retries: original_retries, + priority: priority, + depends: dependents, + tags: tags, + throttles: throttles, + data: data, + } + end + # Move this from it's current queue into another def requeue(queue, opts = {}) note_state_change :requeue do @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, - JSON.generate(opts.fetch(:data, @data)), - *self.class.build_opts_array(job.to_hash.merge(opts)) + *self.class.build_opts_array(self.enqueue_opts.merge(opts)) ) end end diff --git a/lib/qless/queue.rb b/lib/qless/queue.rb index a6accc25..b4476c8b 100644 --- a/lib/qless/queue.rb +++ b/lib/qless/queue.rb @@ -101,6 +101,7 @@ def unpause # => priority (int) # => tags (array of strings) # => delay (int) + # => throttles (array of strings) def put(klass, data, opts = {}) opts = job_options(klass, data, opts) @client.call( @@ -108,8 +109,7 @@ def put(klass, data, opts = {}) worker_name, @name, (opts[:jid] || Qless.generate_jid), klass.is_a?(String) ? klass : klass.name, - JSON.generate(data), - *Job.build_opts_array(opts), + *Job.build_opts_array(opts.merge(:data => data)), ) end diff --git a/spec/unit/job_spec.rb b/spec/unit/job_spec.rb index 16a07e36..2251dcd6 100644 --- a/spec/unit/job_spec.rb +++ b/spec/unit/job_spec.rb @@ -38,12 +38,22 @@ def around_perform(job) end describe '.build_opts_array' do - it 'should return a correctly build array' do - # [delay, priority, priority_value, tags, tags_value, retries, retries_value, depends, depends_value, + it 'should return a correctly built array' do + # [data, delay, priority, priority_value, tags, tags_value, retries, retries_value, depends, depends_value, # throttles, throttles_value] - expected = [0, "priority", 0, "tags", "[]", "retries", 5, "depends", "[]", "throttles", "[]"] + expected = ["{}", 0, "priority", 0, "tags", "[]", "retries", 5, "depends", "[]", "throttles", "[]"] job = Job.build(client, JobClass) - expect(Job.build_opts_array(job.to_hash)).to eq(expected) + expect(Job.build_opts_array(job.enqueue_opts)).to eq(expected) + end + end + + describe '.enqueue_opts' do + it 'return available fields for enqueuing the job' do + job = Job.build(client, JobClass) + opts = job.enqueue_opts + [:data, :priority, :tags, :retries, :depends, :throttles].each do |k| + expect(opts.has_key?(k)).to(be(true)) + end end end From b1eed41cdba0e8cdcf9084ee5adae17cf685ed31 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Mon, 11 May 2015 08:46:08 -0400 Subject: [PATCH 70/83] strong test --- spec/unit/job_spec.rb | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spec/unit/job_spec.rb b/spec/unit/job_spec.rb index 2251dcd6..fb135470 100644 --- a/spec/unit/job_spec.rb +++ b/spec/unit/job_spec.rb @@ -49,11 +49,14 @@ def around_perform(job) describe '.enqueue_opts' do it 'return available fields for enqueuing the job' do + expected_fields = [:data, :priority, :tags, :retries, :depends, :throttles] job = Job.build(client, JobClass) opts = job.enqueue_opts - [:data, :priority, :tags, :retries, :depends, :throttles].each do |k| + expected_fields.each do |k| expect(opts.has_key?(k)).to(be(true)) end + + expect(opts.keys.length).to(equal(expected_fields.length)) end end From 7e6592eb26fb7ba864c8c1449cfe2d9b3e86e028 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Tue, 12 May 2015 08:29:41 -0400 Subject: [PATCH 71/83] peer review --- lib/qless/job.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index c489eb32..28ddcda7 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -268,7 +268,7 @@ def enqueue_opts def requeue(queue, opts = {}) note_state_change :requeue do @client.call('requeue', @client.worker_name, queue, @jid, @klass_name, - *self.class.build_opts_array(self.enqueue_opts.merge(opts)) + *self.class.build_opts_array(self.enqueue_opts.merge!(opts)) ) end end From 7239d218f903af75a5ed743dc2b309f8e5ca3ce5 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 20 May 2015 07:51:58 -0400 Subject: [PATCH 72/83] wip --- lib/qless/worker/base.rb | 14 +++++--------- lib/qless/worker/serial.rb | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/lib/qless/worker/base.rb b/lib/qless/worker/base.rb index 980b06ac..a2da79f2 100644 --- a/lib/qless/worker/base.rb +++ b/lib/qless/worker/base.rb @@ -195,16 +195,12 @@ def on_current_job_lock_lost(&block) @on_current_job_lock_lost = block end - def listen_for_lost_lock + def listen_for_lost_lock(job) # Ensure subscribers always has a value - subscribers = [] - subscribers = uniq_clients.map do |client| - Subscriber.start(client, "ql:w:#{client.worker_name}", log: @log) do |_, message| + subscriber = Subscriber.start(job.client, "ql:w:#{client.worker_name}", log: @log) do |_, message| if message['event'] == 'lock_lost' - with_current_job do |job| - if job && message['jid'] == job.jid - @on_current_job_lock_lost.call(job) - end + if message['jid'] == job.jid + @on_current_job_lock_lost.call(job) end end end @@ -212,7 +208,7 @@ def listen_for_lost_lock yield ensure - subscribers.each(&:stop) + subscriber.stop end private diff --git a/lib/qless/worker/serial.rb b/lib/qless/worker/serial.rb index 3530574a..a7f2273f 100644 --- a/lib/qless/worker/serial.rb +++ b/lib/qless/worker/serial.rb @@ -19,21 +19,21 @@ def run reserver.prep_for_work! - listen_for_lost_lock do - procline "Running #{reserver.description}" + procline "Running #{reserver.description}" - jobs.each do |job| - # Run the job we're working on - log(:debug, "Starting job #{job.klass_name} (#{job.jid} from #{job.queue_name})") - procline "Processing #{job.description}" + jobs.each do |job| + # Run the job we're working on + log(:debug, "Starting job #{job.klass_name} (#{job.jid} from #{job.queue_name})") + procline "Processing #{job.description}" + listen_for_lost_lock(job) do perform(job) - log(:debug, "Finished job #{job.klass_name} (#{job.jid} from #{job.queue_name})") + end + log(:debug, "Finished job #{job.klass_name} (#{job.jid} from #{job.queue_name})") - # So long as we're paused, we should wait - while paused - log(:debug, 'Paused...') - sleep interval - end + # So long as we're paused, we should wait + while paused + log(:debug, 'Paused...') + sleep interval end end end From 0e20d3b850027cbd24fdd8ad3afd0bb3d31fd022 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 20 May 2015 07:56:52 -0400 Subject: [PATCH 73/83] wip --- lib/qless/worker/base.rb | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/qless/worker/base.rb b/lib/qless/worker/base.rb index a2da79f2..a2f17668 100644 --- a/lib/qless/worker/base.rb +++ b/lib/qless/worker/base.rb @@ -198,17 +198,16 @@ def on_current_job_lock_lost(&block) def listen_for_lost_lock(job) # Ensure subscribers always has a value subscriber = Subscriber.start(job.client, "ql:w:#{client.worker_name}", log: @log) do |_, message| - if message['event'] == 'lock_lost' - if message['jid'] == job.jid - @on_current_job_lock_lost.call(job) - end + if message['event'] == 'lock_lost' + if message['jid'] == job.jid + @on_current_job_lock_lost.call(job) end end end yield ensure - subscriber.stop + subscriber.try(:stop) end private From 55e770e39258b25f3dd3417f3291a4c326635332 Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 20 May 2015 08:02:41 -0400 Subject: [PATCH 74/83] wip --- lib/qless/worker/base.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/qless/worker/base.rb b/lib/qless/worker/base.rb index a2f17668..2ff1594d 100644 --- a/lib/qless/worker/base.rb +++ b/lib/qless/worker/base.rb @@ -197,7 +197,7 @@ def on_current_job_lock_lost(&block) def listen_for_lost_lock(job) # Ensure subscribers always has a value - subscriber = Subscriber.start(job.client, "ql:w:#{client.worker_name}", log: @log) do |_, message| + subscriber = Subscriber.start(job.client, "ql:w:#{job.client.worker_name}", log: @log) do |_, message| if message['event'] == 'lock_lost' if message['jid'] == job.jid @on_current_job_lock_lost.call(job) @@ -207,7 +207,7 @@ def listen_for_lost_lock(job) yield ensure - subscriber.try(:stop) + subscriber && subscriber.stop end private From 2dbbb49e2acc88ead3dae09f93c57569c4d44bfc Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 20 May 2015 09:59:16 -0400 Subject: [PATCH 75/83] test fix --- spec/integration/workers/serial_spec.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spec/integration/workers/serial_spec.rb b/spec/integration/workers/serial_spec.rb index 220bcb57..a361600a 100644 --- a/spec/integration/workers/serial_spec.rb +++ b/spec/integration/workers/serial_spec.rb @@ -153,9 +153,11 @@ def self.perform(job) callback_invoked = false worker.on_current_job_lock_lost { callback_invoked = true } + queue.put('JobClass', {}) queue.put('JobClass', {}) - worker.listen_for_lost_lock do + job = queue.pop + worker.listen_for_lost_lock(job) do queue.pop.timeout end From 9f64571634c9bfe66a64589b0e6eab732786b11d Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Wed, 20 May 2015 10:10:44 -0400 Subject: [PATCH 76/83] combine if statements --- lib/qless/worker/base.rb | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/qless/worker/base.rb b/lib/qless/worker/base.rb index 2ff1594d..2de3adf8 100644 --- a/lib/qless/worker/base.rb +++ b/lib/qless/worker/base.rb @@ -198,10 +198,8 @@ def on_current_job_lock_lost(&block) def listen_for_lost_lock(job) # Ensure subscribers always has a value subscriber = Subscriber.start(job.client, "ql:w:#{job.client.worker_name}", log: @log) do |_, message| - if message['event'] == 'lock_lost' - if message['jid'] == job.jid - @on_current_job_lock_lost.call(job) - end + if message['event'] == 'lock_lost' && message['jid'] == job.jid + @on_current_job_lock_lost.call(job) end end From 70c68e80149ebd9bebd429e05f644c13dc7da2c2 Mon Sep 17 00:00:00 2001 From: Matt Conway Date: Thu, 23 Jul 2015 12:20:49 -0400 Subject: [PATCH 77/83] update from qless-core popop_retry branch - retry pop up to config limit when pop quantity would be unfulfilled due to throttling --- lib/qless/lua/qless-lib.lua | 47 +++++++++++++++++++++++++++---------- lib/qless/lua/qless.lua | 38 +++++++++++++++++++++--------- 2 files changed, 61 insertions(+), 24 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 80e1f385..08fabacc 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 5dbc192de654731c02f5e3ecb1ff00b00852121f +-- Current SHA: 98b71d6e188f20c34045bb296a460ed6d53df68d -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -1741,21 +1741,42 @@ function QlessQueue:pop(now, worker, count) -- With these in place, we can expand this list of jids based on the work -- queue itself and the priorities therein - local jids = self.work.peek(count - #dead_jids) or {} - for index, jid in ipairs(jids) do - local job = Qless.job(jid) - if job:throttles_acquire(now) then - self:pop_job(now, worker, job) - table.insert(popped, jid) - else - self:throttle(now, job) + -- Since throttles could prevent work queue items from being popped, we can + -- retry a number of times till we find work items that are not throttled + local pop_retry_limit = tonumber( + Qless.config.get(self.name .. '-max-pop-retry') or + Qless.config.get('max-pop-retry', 1) + ) + + -- Keep trying to fulfill fulfill jobs from the work queue until we reach + -- the desired count or exhaust our retry limit + while #popped < count and pop_retry_limit > 0 do + + local jids = self.work.peek(count - #popped) or {} + + -- If there is nothing in the work queue, then no need to keep looping + if #jids == 0 then + break end - end - -- All jobs should have acquired locks or be throttled, - -- ergo, remove all jids from work queue - self.work.remove(unpack(jids)) + + for index, jid in ipairs(jids) do + local job = Qless.job(jid) + if job:throttles_acquire(now) then + self:pop_job(now, worker, job) + table.insert(popped, jid) + else + self:throttle(now, job) + end + end + + -- All jobs should have acquired locks or be throttled, + -- ergo, remove all jids from work queue + self.work.remove(unpack(jids)) + + pop_retry_limit = pop_retry_limit - 1 + end return popped end diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index d17978c2..b286a146 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 5dbc192de654731c02f5e3ecb1ff00b00852121f +-- Current SHA: 98b71d6e188f20c34045bb296a460ed6d53df68d -- This is a generated file local Qless = { ns = 'ql:' @@ -1286,19 +1286,35 @@ function QlessQueue:pop(now, worker, count) self:check_scheduled(now, count - #dead_jids) - local jids = self.work.peek(count - #dead_jids) or {} - for index, jid in ipairs(jids) do - local job = Qless.job(jid) - if job:throttles_acquire(now) then - self:pop_job(now, worker, job) - table.insert(popped, jid) - else - self:throttle(now, job) + local pop_retry_limit = tonumber( + Qless.config.get(self.name .. '-max-pop-retry') or + Qless.config.get('max-pop-retry', 1) + ) + + while #popped < count and pop_retry_limit > 0 do + + local jids = self.work.peek(count - #popped) or {} + + if #jids == 0 then + break end - end - self.work.remove(unpack(jids)) + + for index, jid in ipairs(jids) do + local job = Qless.job(jid) + if job:throttles_acquire(now) then + self:pop_job(now, worker, job) + table.insert(popped, jid) + else + self:throttle(now, job) + end + end + + self.work.remove(unpack(jids)) + + pop_retry_limit = pop_retry_limit - 1 + end return popped end From d13d38975f3f5f6fcd19a4b05c338636cd629e69 Mon Sep 17 00:00:00 2001 From: Gregory Salmon Date: Fri, 20 Feb 2015 18:11:20 -0500 Subject: [PATCH 78/83] dont inherit when looking up constants --- lib/qless/job.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qless/job.rb b/lib/qless/job.rb index ce4c0325..5868aef1 100644 --- a/lib/qless/job.rb +++ b/lib/qless/job.rb @@ -18,7 +18,7 @@ def initialize(client, jid) def klass @klass ||= @klass_name.split('::').reduce(Object) do |context, name| - context.const_get(name) + context.const_get(name, false) end end From 5425a0f6c4542e8e6574d5332b32c03a1ad6eb1d Mon Sep 17 00:00:00 2001 From: james-lawrence Date: Thu, 10 Sep 2015 11:14:23 -0400 Subject: [PATCH 79/83] update core --- lib/qless/lua/qless-lib.lua | 2 +- lib/qless/lua/qless.lua | 10 +++++++++- lib/qless/qless-core | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 08fabacc..0c3235bd 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 98b71d6e188f20c34045bb296a460ed6d53df68d +-- Current SHA: e3fdb7eca308805afceca302cbe0f4ea64c3624e -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index b286a146..70c578f1 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 98b71d6e188f20c34045bb296a460ed6d53df68d +-- Current SHA: e3fdb7eca308805afceca302cbe0f4ea64c3624e -- This is a generated file local Qless = { ns = 'ql:' @@ -2306,6 +2306,14 @@ QlessAPI['throttle.ttl'] = function(now, tid) return Qless.throttle(tid):ttl() end +QlessAPI['throttle.release'] = function(now, tid, ...) + local throttle = Qless.throttle(tid) + + for _, jid in ipairs(arg) do + throttle:release(now, jid) + end +end + if #KEYS > 0 then error('No Keys should be provided') end local command_name = assert(table.remove(ARGV, 1), 'Must provide a command') diff --git a/lib/qless/qless-core b/lib/qless/qless-core index 5dbc192d..e3fdb7ec 160000 --- a/lib/qless/qless-core +++ b/lib/qless/qless-core @@ -1 +1 @@ -Subproject commit 5dbc192de654731c02f5e3ecb1ff00b00852121f +Subproject commit e3fdb7eca308805afceca302cbe0f4ea64c3624e From df8a785fb19ec38a1b3686c063a53d2bd9f5b5bd Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 5 Jul 2016 13:05:06 -0400 Subject: [PATCH 80/83] Use the backupify version of qless-core --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index f960b50c..0c543cd9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "lib/qless/qless-core"] path = lib/qless/qless-core - url = https://github.com/seomoz/qless-core.git + url = https://github.com/backupify/qless-core.git From 77585b1e914c3e35e842d4d3c355f4de56993949 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Tue, 5 Jul 2016 13:06:37 -0400 Subject: [PATCH 81/83] Update qless-core --- lib/qless/lua/qless-lib.lua | 25 +++++++++++++++++++------ lib/qless/lua/qless.lua | 22 ++++++++++++++++------ lib/qless/qless-core | 2 +- 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index 0c3235bd..a8356e8e 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: e3fdb7eca308805afceca302cbe0f4ea64c3624e +-- Current SHA: 6dbf028a915fb8c9b1df37310659adc8dc1762ca -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -1717,8 +1717,11 @@ function QlessQueue:pop(now, worker, count) local popped = {} for index, jid in ipairs(dead_jids) do - self:pop_job(now, worker, Qless.job(jid)) - table.insert(popped, jid) + local success = self:pop_job(now, worker, Qless.job(jid)) + -- only track jid if a job was popped and it's not a phantom jid + if success then + table.insert(popped, jid) + end end -- if queue is at max capacity don't pop any further jobs. @@ -1764,8 +1767,11 @@ function QlessQueue:pop(now, worker, count) for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:throttles_acquire(now) then - self:pop_job(now, worker, job) - table.insert(popped, jid) + local success = self:pop_job(now, worker, job) + -- only track jid if a job was popped and it's not a phantom jid + if success then + table.insert(popped, jid) + end else self:throttle(now, job) end @@ -1795,7 +1801,13 @@ end function QlessQueue:pop_job(now, worker, job) local state local jid = job.jid - state = unpack(job:data('state')) + local job_state = job:data('state') + -- if the job doesn't exist, short circuit + if not job_state then + return false + end + + state = unpack(job_state) job:history(now, 'popped', {worker = worker}) -- We should find the heartbeat interval for this queue heartbeat @@ -1827,6 +1839,7 @@ function QlessQueue:pop_job(now, worker, job) if tracked then Qless.publish('popped', jid) end + return true end -- Update the stats for this queue diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 70c578f1..7eef0774 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: e3fdb7eca308805afceca302cbe0f4ea64c3624e +-- Current SHA: 6dbf028a915fb8c9b1df37310659adc8dc1762ca -- This is a generated file local Qless = { ns = 'ql:' @@ -1273,8 +1273,10 @@ function QlessQueue:pop(now, worker, count) local popped = {} for index, jid in ipairs(dead_jids) do - self:pop_job(now, worker, Qless.job(jid)) - table.insert(popped, jid) + local success = self:pop_job(now, worker, Qless.job(jid)) + if success then + table.insert(popped, jid) + end end if not Qless.throttle(QlessQueue.ns .. self.name):available() then @@ -1304,8 +1306,10 @@ function QlessQueue:pop(now, worker, count) for index, jid in ipairs(jids) do local job = Qless.job(jid) if job:throttles_acquire(now) then - self:pop_job(now, worker, job) - table.insert(popped, jid) + local success = self:pop_job(now, worker, job) + if success then + table.insert(popped, jid) + end else self:throttle(now, job) end @@ -1332,7 +1336,12 @@ end function QlessQueue:pop_job(now, worker, job) local state local jid = job.jid - state = unpack(job:data('state')) + local job_state = job:data('state') + if not job_state then + return false + end + + state = unpack(job_state) job:history(now, 'popped', {worker = worker}) local expires = now + tonumber( @@ -1359,6 +1368,7 @@ function QlessQueue:pop_job(now, worker, job) if tracked then Qless.publish('popped', jid) end + return true end function QlessQueue:stat(now, stat, val) diff --git a/lib/qless/qless-core b/lib/qless/qless-core index e3fdb7ec..6dbf028a 160000 --- a/lib/qless/qless-core +++ b/lib/qless/qless-core @@ -1 +1 @@ -Subproject commit e3fdb7eca308805afceca302cbe0f4ea64c3624e +Subproject commit 6dbf028a915fb8c9b1df37310659adc8dc1762ca From 58a8b90325d2ada58708d0dcace1fccff74c2ac1 Mon Sep 17 00:00:00 2001 From: Danny Guinther Date: Fri, 8 Jul 2016 12:00:20 -0400 Subject: [PATCH 82/83] Update core to fix moved job throttles --- lib/qless/lua/qless-lib.lua | 9 ++++++++- lib/qless/lua/qless.lua | 8 +++++++- lib/qless/qless-core | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/lib/qless/lua/qless-lib.lua b/lib/qless/lua/qless-lib.lua index a8356e8e..2b8a040c 100644 --- a/lib/qless/lua/qless-lib.lua +++ b/lib/qless/lua/qless-lib.lua @@ -1,4 +1,4 @@ --- Current SHA: 6dbf028a915fb8c9b1df37310659adc8dc1762ca +-- Current SHA: 20dc687832ad472f0a00899d26c285b893ff466c -- This is a generated file ------------------------------------------------------------------------------- -- Forward declarations to make everything happy @@ -1961,9 +1961,16 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) job:history(now, 'put', {q = self.name}) -- If this item was previously in another queue, then we should remove it from there + -- and remove the associated throttle if oldqueue then local queue_obj = Qless.queue(oldqueue) queue_obj:remove_job(jid) + local old_qid = QlessQueue.ns .. oldqueue + for index, tname in ipairs(throttles) do + if tname == old_qid then + table.remove(throttles, index) + end + end end -- If this had previously been given out to a worker, make sure to remove it diff --git a/lib/qless/lua/qless.lua b/lib/qless/lua/qless.lua index 7eef0774..0623d1e0 100644 --- a/lib/qless/lua/qless.lua +++ b/lib/qless/lua/qless.lua @@ -1,4 +1,4 @@ --- Current SHA: 6dbf028a915fb8c9b1df37310659adc8dc1762ca +-- Current SHA: 20dc687832ad472f0a00899d26c285b893ff466c -- This is a generated file local Qless = { ns = 'ql:' @@ -1462,6 +1462,12 @@ function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) if oldqueue then local queue_obj = Qless.queue(oldqueue) queue_obj:remove_job(jid) + local old_qid = QlessQueue.ns .. oldqueue + for index, tname in ipairs(throttles) do + if tname == old_qid then + table.remove(throttles, index) + end + end end if oldworker and oldworker ~= '' then diff --git a/lib/qless/qless-core b/lib/qless/qless-core index 6dbf028a..20dc6878 160000 --- a/lib/qless/qless-core +++ b/lib/qless/qless-core @@ -1 +1 @@ -Subproject commit 6dbf028a915fb8c9b1df37310659adc8dc1762ca +Subproject commit 20dc687832ad472f0a00899d26c285b893ff466c From 91965f35d37d9954cab2541229c1a6c1cdc635ff Mon Sep 17 00:00:00 2001 From: Manni Wood Date: Tue, 31 Oct 2017 15:52:46 -0400 Subject: [PATCH 83/83] No longer prints stack trace in qless error storage --- lib/qless/failure_formatter.rb | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/lib/qless/failure_formatter.rb b/lib/qless/failure_formatter.rb index 868bb24c..4546c5a1 100644 --- a/lib/qless/failure_formatter.rb +++ b/lib/qless/failure_formatter.rb @@ -15,29 +15,25 @@ def initialize @replacements[ENV['GEM_HOME']] = '' if ENV.key?('GEM_HOME') end + # lib/qless/job.rb#fail shows us that qless, right down to the Lua scripts, + # is set up to expect both a group and a message for a failed job. So we + # can't stop storing failed jobs altogether. But, to save on precious RAM, + # we can stop recording the message, which is the stack traces that we currently + # store. def format(job, error, lines_to_remove = caller(2)) group = "#{job.klass_name}:#{error.class}" - message = "#{truncated_message(error)}\n\n" + - "#{format_failure_backtrace(error.backtrace, lines_to_remove)}" + message = "#{truncated_message(error)}" Failure.new(group, message) end private # TODO: pull this out into a config option. - MAX_ERROR_MESSAGE_SIZE = 10_000 + MAX_ERROR_MESSAGE_SIZE = 100 def truncated_message(error) return error.message if error.message.length <= MAX_ERROR_MESSAGE_SIZE error.message.slice(0, MAX_ERROR_MESSAGE_SIZE) + "\n... (truncated due to length)" end - - def format_failure_backtrace(error_backtrace, lines_to_remove) - (error_backtrace - lines_to_remove).map do |line| - @replacements.reduce(line) do |formatted, (original, new)| - formatted.sub(original, new) - end - end.join("\n") - end end end