Job reentrancy avoidance proposal

In case someone else has the same problems, we went ahead and implemented an own JobFilterAttribute which we use instead of DisableConcurrentExecutionAttribute. The code is mostly the same as proposed by @Domonkos (thx!) :

/// <summary>
/// Attribute to skip a job execution if the same job is already running.
/// Mostly taken from: http://discuss.hangfire.io/t/job-reentrancy-avoidance-proposal/607
/// </summary>
public class SkipConcurrentExecutionAttribute : JobFilterAttribute, IServerFilter
{
	private static readonly Logger logger = LogManager.GetCurrentClassLogger();

	private readonly int _timeoutInSeconds;

	public SkipConcurrentExecutionAttribute(int timeoutInSeconds)
	{
		if (timeoutInSeconds < 0) throw new ArgumentException("Timeout argument value should be greater that zero.");

		_timeoutInSeconds = timeoutInSeconds;
	}


	public void OnPerforming(PerformingContext filterContext)
	{
		var resource = String.Format(
							 "{0}.{1}",
							filterContext.Job.Type.FullName,
							filterContext.Job.Method.Name);

		var timeout = TimeSpan.FromSeconds(_timeoutInSeconds);

		try
		{
			var distributedLock = filterContext.Connection.AcquireDistributedLock(resource, timeout);
			filterContext.Items["DistributedLock"] = distributedLock;
		}
		catch (Exception)
		{
			filterContext.Canceled = true;
			logger.Warn("Cancelling run for {0} job, id: {1} ", resource, filterContext.JobId);
		}
	}

	public void OnPerformed(PerformedContext filterContext)
	{
		if (!filterContext.Items.ContainsKey("DistributedLock"))
		{
			throw new InvalidOperationException("Can not release a distributed lock: it was not acquired.");
		}

		var distributedLock = (IDisposable)filterContext.Items["DistributedLock"];
		distributedLock.Dispose();
	}
}
1 Like