summaryrefslogtreecommitdiff
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
authorStephen M. Cameron <stephenmcameron@gmail.com>2014-11-14 17:27:09 -0600
committerChristoph Hellwig <hch@lst.de>2014-11-20 09:11:25 +0100
commit0cbf768ef834c810d1eab205f21a434b9356d329 (patch)
tree874822da5ceeb78fc03bebd59e2b0ee545cc23e2 /drivers/scsi/hpsa.c
parenta505b86fde5903944828fa04f775e79a6636791d (diff)
downloadlwn-0cbf768ef834c810d1eab205f21a434b9356d329.tar.gz
lwn-0cbf768ef834c810d1eab205f21a434b9356d329.zip
hpsa: use atomics for commands_outstanding
Use atomics for commands_outstanding instead of protecting with spin locks. Signed-off-by: Don Brace <don.brace@pmcs.com> Signed-off-by: Stephen M. Cameron <stephenmcameron@gmail.com> Reviewed-by: Joe Handzik <joseph.t.handzik@hp.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f028ae4a04be..c079bb94f86b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -394,7 +394,8 @@ static ssize_t host_show_commands_outstanding(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ctlr_info *h = shost_to_hba(shost);
- return snprintf(buf, 20, "%d\n", h->commands_outstanding);
+ return snprintf(buf, 20, "%d\n",
+ atomic_read(&h->commands_outstanding));
}
static ssize_t host_show_transport_mode(struct device *dev,
@@ -700,7 +701,6 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
{
u32 a;
struct reply_queue_buffer *rq = &h->reply_queue[q];
- unsigned long flags;
if (h->transMethod & CFGTBL_Trans_io_accel1)
return h->access.command_completed(h, q);
@@ -711,9 +711,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
a = rq->head[rq->current_entry];
rq->current_entry++;
- spin_lock_irqsave(&h->lock, flags);
- h->commands_outstanding--;
- spin_unlock_irqrestore(&h->lock, flags);
+ atomic_dec(&h->commands_outstanding);
} else {
a = FIFO_EMPTY;
}
@@ -5445,15 +5443,9 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
-
- /* Must increment commands_outstanding before unlocking
- * and submitting to avoid race checking for fifo full
- * condition.
- */
- h->commands_outstanding++;
-
- /* Tell the controller execute command */
+ atomic_inc(&h->commands_outstanding);
spin_unlock_irqrestore(&h->lock, *flags);
+ /* Tell the controller execute command */
h->access.submit_command(h, c);
spin_lock_irqsave(&h->lock, *flags);
}
@@ -5499,6 +5491,7 @@ static inline void finish_cmd(struct CommandList *c)
unsigned long flags;
int io_may_be_stalled = 0;
struct ctlr_info *h = c->h;
+ int count;
spin_lock_irqsave(&h->lock, flags);
removeQ(c);
@@ -5519,11 +5512,10 @@ static inline void finish_cmd(struct CommandList *c)
* want to get in a cycle where we call start_io every time
* through here.
*/
- if (unlikely(h->fifo_recently_full) &&
- h->commands_outstanding < 5)
- io_may_be_stalled = 1;
-
+ count = atomic_read(&h->commands_outstanding);
spin_unlock_irqrestore(&h->lock, flags);
+ if (unlikely(h->fifo_recently_full) && count < 5)
+ io_may_be_stalled = 1;
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI