Hangfire Deadlocks on job deletes

I am having deadlocks on hangfire job deletes. You can see the deadlock graph xml below

<deadlock>
  <victim-list>
    <victimProcess id="processae7e82cf8" />
  </victim-list>
  <process-list>
    <process id="processae7e82cf8" taskpriority="0" logused="3957288" waitresource="KEY: 5:72057609169731584 (2d8fbb18e0a6)" waittime="2889" ownerId="318962622" transactionname="DELETE" lasttranstarted="2015-08-27T08:58:16.400" XDES="0xccd2383a8" lockMode="RangeS-U" schedulerid="2" kpid="3220" status="suspended" spid="184" sbid="0" ecid="0" priority="0" trancount="2" lastbatchstarted="2015-08-27T08:58:16.403" lastbatchcompleted="2015-08-27T08:58:16.390" lastattention="1900-01-01T00:00:00.390" clientapp="EmptorTelcoDebug" hostname="YULIYA" hostpid="5020" loginname="Telco" isolationlevel="read committed (2)" xactid="318962622" currentdb="5" lockTimeout="4294967295" clientoption1="671088672" clientoption2="128056">
      <executionStack>
        <frame procname="adhoc" line="3" stmtstart="150" sqlhandle="0x02000000197dd52e3e681c9f05d071b4ca8fe816e66af4250000000000000000000000000000000000000000">
delete top (@count) from HangFire.[Job] with (readpast) where ExpireAt &lt; @now;    </frame>
        <frame procname="unknown" line="1" sqlhandle="0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000">
unknown    </frame>
      </executionStack>
      <inputbuf>
(@now datetime,@count int)
set transaction isolation level read committed;
delete top (@count) from HangFire.[Job] with (readpast) where ExpireAt &lt; @now;   </inputbuf>
    </process>
    <process id="process1333069c38" taskpriority="0" logused="3967624" waitresource="KEY: 5:72057609169731584 (3c979127a6c9)" waittime="2889" ownerId="318962647" transactionname="DELETE" lasttranstarted="2015-08-27T08:58:16.413" XDES="0x1ec3d7f6a8" lockMode="RangeS-U" schedulerid="17" kpid="13648" status="suspended" spid="185" sbid="0" ecid="0" priority="0" trancount="2" lastbatchstarted="2015-08-27T08:58:16.420" lastbatchcompleted="2015-08-27T08:58:16.410" lastattention="1900-01-01T00:00:00.410" clientapp="EmptorTelcoDebug" hostname="YULIYA" hostpid="5020" loginname="Telco" isolationlevel="read committed (2)" xactid="318962647" currentdb="5" lockTimeout="4294967295" clientoption1="671088672" clientoption2="128056">
      <executionStack>
        <frame procname="adhoc" line="3" stmtstart="150" sqlhandle="0x02000000197dd52e3e681c9f05d071b4ca8fe816e66af4250000000000000000000000000000000000000000">
delete top (@count) from HangFire.[Job] with (readpast) where ExpireAt &lt; @now;    </frame>
        <frame procname="unknown" line="1" sqlhandle="0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000">
unknown    </frame>
      </executionStack>
      <inputbuf>
(@now datetime,@count int)
set transaction isolation level read committed;
delete top (@count) from HangFire.[Job] with (readpast) where ExpireAt &lt; @now;   </inputbuf>
    </process>
  </process-list>
  <resource-list>
    <keylock hobtid="72057609169731584" dbid="5" objectname="EmptorKocnetII.HangFire.State" indexname="IX_HangFire_State_JobId" id="lock18e5929600" mode="RangeS-U" associatedObjectId="72057609169731584">
      <owner-list>
        <owner id="process1333069c38" mode="RangeS-U" />
      </owner-list>
      <waiter-list>
        <waiter id="processae7e82cf8" mode="RangeS-U" requestType="wait" />
      </waiter-list>
    </keylock>
    <keylock hobtid="72057609169731584" dbid="5" objectname="EmptorKocnetII.HangFire.State" indexname="IX_HangFire_State_JobId" id="lock1c28549300" mode="RangeS-U" associatedObjectId="72057609169731584">
      <owner-list>
        <owner id="processae7e82cf8" mode="RangeS-U" />
      </owner-list>
      <waiter-list>
        <waiter id="process1333069c38" mode="RangeS-U" requestType="wait" />
      </waiter-list>
    </keylock>
  </resource-list>
</deadlock>

@caltuntas, thank you for the report. I’ve created an issue on GitHub and scheduled it to the next maintenance release.

@caltuntas, the fix is available with the 1.4.6 release.

Hi @odinserj,

Thanks for the fix,