diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-25 12:40:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-05-25 12:40:08 -0400 |
commit | c6a756795d5ba0637aae8da89dd11bb7e3a1ee74 (patch) | |
tree | 1c19f951f2604dbb6b867a6dcdf94d20c204cc5c /drivers/infiniband | |
parent | 382066da251132f768380f4852ed5afb72d88f80 (diff) | |
parent | a8bd60705aa17a998516837d9c1e503ad4cbd7fc (diff) | |
download | lwn-c6a756795d5ba0637aae8da89dd11bb7e3a1ee74.tar.gz lwn-c6a756795d5ba0637aae8da89dd11bb7e3a1ee74.zip |
Merge branch 'master'
Diffstat (limited to 'drivers/infiniband')
22 files changed, 174 insertions, 147 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 7cfedb8d9bcd..86fee43502cd 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -34,6 +34,8 @@ * * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ */ + +#include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/idr.h> @@ -122,7 +124,7 @@ struct cm_id_private { struct rb_node service_node; struct rb_node sidr_id_node; spinlock_t lock; /* Do not acquire inside cm.lock */ - wait_queue_head_t wait; + struct completion comp; atomic_t refcount; struct ib_mad_send_buf *msg; @@ -159,7 +161,7 @@ static void cm_work_handler(void *data); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (atomic_dec_and_test(&cm_id_priv->refcount)) - wake_up(&cm_id_priv->wait); + complete(&cm_id_priv->comp); } static int cm_alloc_msg(struct cm_id_private *cm_id_priv, @@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, goto error; spin_lock_init(&cm_id_priv->lock); - init_waitqueue_head(&cm_id_priv->wait); + init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); @@ -724,8 +726,8 @@ retest: } cm_free_id(cm_id->local_id); - atomic_dec(&cm_id_priv->refcount); - wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); + cm_deref_id(cm_id_priv); + wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); if (cm_id_priv->private_data && cm_id_priv->private_data_len) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 469b6923a2e2..5ad41a64314c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, INIT_WORK(&mad_agent_priv->local_work, local_completions, mad_agent_priv); atomic_set(&mad_agent_priv->refcount, 1); - init_waitqueue_head(&mad_agent_priv->wait); + init_completion(&mad_agent_priv->comp); return &mad_agent_priv->agent; @@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; - init_waitqueue_head(&mad_snoop_priv->wait); + init_completion(&mad_snoop_priv->comp); mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); @@ -486,6 +486,18 @@ error1: } EXPORT_SYMBOL(ib_register_mad_snoop); +static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) +{ + if (atomic_dec_and_test(&mad_agent_priv->refcount)) + complete(&mad_agent_priv->comp); +} + +static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) +{ + if (atomic_dec_and_test(&mad_snoop_priv->refcount)) + complete(&mad_snoop_priv->comp); +} + static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_port_private *port_priv; @@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) flush_workqueue(port_priv->wq); ib_cancel_rmpp_recvs(mad_agent_priv); - atomic_dec(&mad_agent_priv->refcount); - wait_event(mad_agent_priv->wait, - !atomic_read(&mad_agent_priv->refcount)); + deref_mad_agent(mad_agent_priv); + wait_for_completion(&mad_agent_priv->comp); kfree(mad_agent_priv->reg_req); ib_dereg_mr(mad_agent_priv->agent.mr); @@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) atomic_dec(&qp_info->snoop_count); spin_unlock_irqrestore(&qp_info->snoop_lock, flags); - atomic_dec(&mad_snoop_priv->refcount); - wait_event(mad_snoop_priv->wait, - !atomic_read(&mad_snoop_priv->refcount)); + deref_snoop_agent(mad_snoop_priv); + wait_for_completion(&mad_snoop_priv->comp); kfree(mad_snoop_priv); } @@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, send_buf, mad_send_wc); - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) - wake_up(&mad_snoop_priv->wait); + deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); @@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, mad_recv_wc); - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) - wake_up(&mad_snoop_priv->wait); + deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); @@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) free_send_rmpp_list(mad_send_wr); kfree(send_buf->mad); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_free_send_mad); @@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; } } @@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(mad_recv_wc); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; } ib_mark_mad_done(mad_send_wr); @@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); } } @@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, mad_send_wc); /* Release reference on agent taken when sending */ - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 6c9c133d71ef..b4fa28d3160f 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -37,6 +37,7 @@ #ifndef __IB_MAD_PRIV_H__ #define __IB_MAD_PRIV_H__ +#include <linux/completion.h> #include <linux/pci.h> #include <linux/kthread.h> #include <linux/workqueue.h> @@ -108,7 +109,7 @@ struct ib_mad_agent_private { struct list_head rmpp_list; atomic_t refcount; - wait_queue_head_t wait; + struct completion comp; }; struct ib_mad_snoop_private { @@ -117,7 +118,7 @@ struct ib_mad_snoop_private { int snoop_index; int mad_snoop_flags; atomic_t refcount; - wait_queue_head_t wait; + struct completion comp; }; struct ib_mad_send_wr_private { diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index dfd4e588ce03..d4704e054e30 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -49,7 +49,7 @@ struct mad_rmpp_recv { struct list_head list; struct work_struct timeout_work; struct work_struct cleanup_work; - wait_queue_head_t wait; + struct completion comp; enum rmpp_state state; spinlock_t lock; atomic_t refcount; @@ -69,10 +69,16 @@ struct mad_rmpp_recv { u8 method; }; +static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) +{ + if (atomic_dec_and_test(&rmpp_recv->refcount)) + complete(&rmpp_recv->comp); +} + static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { - atomic_dec(&rmpp_recv->refcount); - wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); + deref_rmpp_recv(rmpp_recv); + wait_for_completion(&rmpp_recv->comp); ib_destroy_ah(rmpp_recv->ah); kfree(rmpp_recv); } @@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, goto error; rmpp_recv->agent = agent; - init_waitqueue_head(&rmpp_recv->wait); + init_completion(&rmpp_recv->comp); INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); spin_lock_init(&rmpp_recv->lock); @@ -279,12 +285,6 @@ error: kfree(rmpp_recv); return NULL; } -static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) -{ - if (atomic_dec_and_test(&rmpp_recv->refcount)) - wake_up(&rmpp_recv->wait); -} - static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f6a05965a4e8..9164a09b6ccd 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -32,6 +32,8 @@ * * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ */ + +#include <linux/completion.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/module.h> @@ -72,7 +74,7 @@ struct ib_ucm_file { struct ib_ucm_context { int id; - wait_queue_head_t wait; + struct completion comp; atomic_t ref; int events_reported; @@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) - wake_up(&ctx->wait); + complete(&ctx->comp); } static inline int ib_ucm_new_cm_id(int event) @@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) return NULL; atomic_set(&ctx->ref, 1); - init_waitqueue_head(&ctx->wait); + init_completion(&ctx->comp); ctx->file = file; INIT_LIST_HEAD(&ctx->events); @@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); - atomic_dec(&ctx->ref); - wait_event(ctx->wait, !atomic_read(&ctx->ref)); + ib_ucm_ctx_put(ctx); + wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the cm_id. */ ib_destroy_cm_id(ctx->cm_id); diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index 36a32c315668..efe147dbeb42 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c @@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) */ work = kmalloc(sizeof *work, GFP_KERNEL); - if (!work) + if (!work) { + mmput(mm); return; + } INIT_WORK(&work->work, ib_umem_account, work); work->mm = mm; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 398add4d4cb1..dddcdae736ac 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *, #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 static const struct pci_device_id ipath_pci_tbl[] = { - {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, - PCI_DEVICE_ID_INFINIPATH_HT)}, - {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, - PCI_DEVICE_ID_INFINIPATH_PE800)}, + { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, + { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); @@ -1906,19 +1905,19 @@ static void __exit infinipath_cleanup(void) } else ipath_dbg("irq is 0, not doing free_irq " "for unit %u\n", dd->ipath_unit); - dd->pcidev = NULL; - } - /* - * we check for NULL here, because it's outside the kregbase - * check, and we need to call it after the free_irq. Thus - * it's possible that the function pointers were never - * initialized. - */ - if (dd->ipath_f_cleanup) - /* clean up chip-specific stuff */ - dd->ipath_f_cleanup(dd); + /* + * we check for NULL here, because it's outside + * the kregbase check, and we need to call it + * after the free_irq. Thus it's possible that + * the function pointers were never initialized. + */ + if (dd->ipath_f_cleanup) + /* clean up chip-specific stuff */ + dd->ipath_f_cleanup(dd); + dd->pcidev = NULL; + } spin_lock_irqsave(&ipath_devs_lock, flags); } diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c index f11a900e8cd7..a2f1ceafcca9 100644 --- a/drivers/infiniband/hw/ipath/ipath_eeprom.c +++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c @@ -505,11 +505,10 @@ static u8 flash_csum(struct ipath_flash *ifp, int adjust) * ipath_get_guid - get the GUID from the i2c device * @dd: the infinipath device * - * When we add the multi-chip support, we will probably have to add - * the ability to use the number of guids field, and get the guid from - * the first chip's flash, to use for all of them. + * We have the capability to use the ipath_nguid field, and get + * the guid from the first chip's flash, to use for all of them. */ -void ipath_get_guid(struct ipath_devdata *dd) +void ipath_get_eeprom_info(struct ipath_devdata *dd) { void *buf; struct ipath_flash *ifp; diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index c347191f02bf..ada267e41f6c 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -139,7 +139,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd, kinfo->spi_piosize = dd->ipath_ibmaxlen; kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ kinfo->spi_port = pd->port_port; - kinfo->spi_sw_version = IPATH_USER_SWVERSION; + kinfo->spi_sw_version = IPATH_KERN_SWVERSION; kinfo->spi_hw_version = dd->ipath_revision; if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) @@ -1224,6 +1224,10 @@ static unsigned int ipath_poll(struct file *fp, if (tail == head) { set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); + if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ + (void)ipath_write_ureg(dd, ur_rcvhdrhead, + dd->ipath_rhdrhead_intr_off + | head, pd->port_port); poll_wait(fp, &pd->port_wait, pt); if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c index 4652435998f3..fac0a2b74de2 100644 --- a/drivers/infiniband/hw/ipath/ipath_ht400.c +++ b/drivers/infiniband/hw/ipath/ipath_ht400.c @@ -607,7 +607,12 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, case 4: /* Ponderosa is one of the bringup boards */ n = "Ponderosa"; break; - case 5: /* HT-460 original production board */ + case 5: + /* + * HT-460 original production board; two production levels, with + * different serial number ranges. See ipath_ht_early_init() for + * case where we enable IPATH_GPIO_INTR for later serial # range. + */ n = "InfiniPath_HT-460"; break; case 6: @@ -642,7 +647,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, if (n) snprintf(name, namelen, "%s", n); - if (dd->ipath_majrev != 3 || dd->ipath_minrev != 2) { + if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { /* * This version of the driver only supports the HT-400 * Rev 3.2 @@ -1520,6 +1525,18 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) */ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, INFINIPATH_S_ABORT); + + ipath_get_eeprom_info(dd); + if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && + dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { + /* + * Later production HT-460 has same changes as HT-465, so + * can use GPIO interrupts. They have serial #'s starting + * with 128, rather than 112. + */ + dd->ipath_flags |= IPATH_GPIO_INTR; + dd->ipath_flags &= ~IPATH_POLL_RX_INTR; + } return 0; } diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 16f640e1c16e..dc83250d26a6 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -879,7 +879,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) done: if (!ret) { - ipath_get_guid(dd); *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; if (!dd->ipath_f_intrsetup(dd)) { /* now we can enable all interrupts from the chip */ diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e6507f8115bc..5d92d57b6f54 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -650,7 +650,7 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); void ipath_init_pe800_funcs(struct ipath_devdata *); /* init HT-400-specific func */ void ipath_init_ht400_funcs(struct ipath_devdata *); -void ipath_get_guid(struct ipath_devdata *); +void ipath_get_eeprom_info(struct ipath_devdata *); u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); /* diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index aa33b0e9f2f6..5ae8761f9dd2 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c @@ -136,9 +136,7 @@ int ipath_lkey_ok(struct ipath_lkey_table *rkt, struct ipath_sge *isge, ret = 1; goto bail; } - spin_lock(&rkt->lock); mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))]; - spin_unlock(&rkt->lock); if (unlikely(mr == NULL || mr->lkey != sge->lkey)) { ret = 0; goto bail; @@ -184,8 +182,6 @@ bail: * @acc: access flags * * Return 1 if successful, otherwise 0. - * - * The QP r_rq.lock should be held. */ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, u32 len, u64 vaddr, u32 rkey, int acc) @@ -196,9 +192,7 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, size_t off; int ret; - spin_lock(&rkt->lock); mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; - spin_unlock(&rkt->lock); if (unlikely(mr == NULL || mr->lkey != rkey)) { ret = 0; goto bail; diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index 9cb5258ffed9..9ec4ac77b87f 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c @@ -872,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, update_sge(ss, len); length -= len; } + /* Update address before sending packet. */ + update_sge(ss, length); /* must flush early everything before trigger word */ ipath_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ ipath_flush_wc(); - update_sge(ss, length); } /** @@ -943,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 w; + u32 *addr = (u32 *) ss->sge.vaddr; + /* Update address before sending packet. */ + update_sge(ss, len); /* Need to round up for the last dword in the packet. */ w = (len + 3) >> 2; - __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); + __iowrite32_copy(piobuf, addr, w - 1); /* must flush early everything before trigger word */ ipath_flush_wc(); - __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], - piobuf + w - 1); + __raw_writel(addr[w - 1], piobuf + w - 1); /* be sure trigger word is written */ ipath_flush_wc(); - update_sge(ss, len); ret = 0; goto bail; } diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c index 6318067ab5ec..02e8c75b24f6 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_pe800.c @@ -1180,6 +1180,8 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) */ dd->ipath_rhdrhead_intr_off = 1ULL<<32; + ipath_get_eeprom_info(dd); + return 0; } diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 18890716db1e..9f8855d970c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -375,10 +375,10 @@ static void ipath_error_qp(struct ipath_qp *qp) spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); wc.status = IB_WC_WR_FLUSH_ERR; @@ -427,6 +427,7 @@ static void ipath_error_qp(struct ipath_qp *qp) int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) { + struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; unsigned long flags; @@ -443,6 +444,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr_mask)) goto inval; + if (attr_mask & IB_QP_AV) + if (attr->ah_attr.dlid == 0 || + attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) + goto inval; + + if (attr_mask & IB_QP_PKEY_INDEX) + if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) + goto inval; + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + if (attr->min_rnr_timer > 31) + goto inval; + switch (new_state) { case IB_QPS_RESET: ipath_reset_qp(qp); @@ -457,13 +471,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } - if (attr_mask & IB_QP_PKEY_INDEX) { - struct ipath_ibdev *dev = to_idev(ibqp->device); - - if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) - goto inval; + if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; - } if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; @@ -479,12 +488,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; - if (attr_mask & IB_QP_AV) { - if (attr->ah_attr.dlid == 0 || - attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) - goto inval; + if (attr_mask & IB_QP_AV) qp->remote_ah_attr = attr->ah_attr; - } if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; @@ -499,11 +504,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->s_rnr_retry_cnt = qp->s_rnr_retry; } - if (attr_mask & IB_QP_MIN_RNR_TIMER) { - if (attr->min_rnr_timer > 31) - goto inval; + if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->s_min_rnr_timer = attr->min_rnr_timer; - } if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; @@ -710,10 +712,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, init_attr->qp_type == IB_QPT_RC ? ipath_do_rc_send : ipath_do_uc_send, (unsigned long)qp); - qp->piowait.next = LIST_POISON1; - qp->piowait.prev = LIST_POISON2; - qp->timerwait.next = LIST_POISON1; - qp->timerwait.prev = LIST_POISON2; + INIT_LIST_HEAD(&qp->piowait); + INIT_LIST_HEAD(&qp->timerwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; @@ -734,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ipath_reset_qp(qp); /* Tell the core driver that the kernel SMA is present. */ - if (qp->ibqp.qp_type == IB_QPT_SMI) + if (init_attr->qp_type == IB_QPT_SMI) ipath_layer_set_verbs_flags(dev->dd, IPATH_VERBS_KERNEL_SMA); break; @@ -783,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp) /* Make sure the QP isn't on the timeout list. */ spin_lock_irqsave(&dev->pending_lock, flags); - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* @@ -855,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index a4055ca00614..493b1821a934 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -57,7 +57,7 @@ static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) qp->s_len = wqe->length - len; dev = to_idev(qp->ibqp.device); spin_lock(&dev->pending_lock); - if (qp->timerwait.next == LIST_POISON1) + if (list_empty(&qp->timerwait)) list_add_tail(&qp->timerwait, &dev->pending[dev->pending_index]); spin_unlock(&dev->pending_lock); @@ -356,7 +356,7 @@ static inline int ipath_make_rc_req(struct ipath_qp *qp, if ((int)(qp->s_psn - qp->s_next_psn) > 0) qp->s_next_psn = qp->s_psn; spin_lock(&dev->pending_lock); - if (qp->timerwait.next == LIST_POISON1) + if (list_empty(&qp->timerwait)) list_add_tail(&qp->timerwait, &dev->pending[dev->pending_index]); spin_unlock(&dev->pending_lock); @@ -726,8 +726,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) */ dev = to_idev(qp->ibqp.device); spin_lock(&dev->pending_lock); - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); spin_unlock(&dev->pending_lock); if (wqe->wr.opcode == IB_WR_RDMA_READ) @@ -886,8 +886,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode) * just won't find anything to restart if we ACK everything. */ spin_lock(&dev->pending_lock); - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); spin_unlock(&dev->pending_lock); /* @@ -1194,8 +1194,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, IB_WR_RDMA_READ)) goto ack_done; spin_lock(&dev->pending_lock); - if (qp->s_rnr_timeout == 0 && - qp->timerwait.next != LIST_POISON1) + if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) list_move_tail(&qp->timerwait, &dev->pending[dev->pending_index]); spin_unlock(&dev->pending_lock); diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index eb81424b3c5b..d38f4f3cfd1d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -435,7 +435,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); - if (qp->piowait.next == LIST_POISON1) + if (list_empty(&qp->piowait)) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index cb9e387c301f..28fdbdaa789d 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -464,7 +464,7 @@ static void ipath_ib_timer(void *arg) last = &dev->pending[dev->pending_index]; while (!list_empty(last)) { qp = list_entry(last->next, struct ipath_qp, timerwait); - list_del(&qp->timerwait); + list_del_init(&qp->timerwait); qp->timer_next = resend; resend = qp; atomic_inc(&qp->refcount); @@ -474,7 +474,7 @@ static void ipath_ib_timer(void *arg) qp = list_entry(last->next, struct ipath_qp, timerwait); if (--qp->s_rnr_timeout == 0) { do { - list_del(&qp->timerwait); + list_del_init(&qp->timerwait); tasklet_hi_schedule(&qp->s_task); if (list_empty(last)) break; @@ -554,7 +554,7 @@ static int ipath_ib_piobufavail(void *arg) while (!list_empty(&dev->piowait)) { qp = list_entry(dev->piowait.next, struct ipath_qp, piowait); - list_del(&qp->piowait); + list_del_init(&qp->piowait); tasklet_hi_schedule(&qp->s_task); } spin_unlock_irqrestore(&dev->pending_lock, flags); @@ -951,6 +951,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) idev->dd = dd; strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); + dev->owner = THIS_MODULE; dev->node_guid = ipath_layer_get_guid(dd); dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1985b5dfa481..798e13e14faf 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -182,7 +182,7 @@ struct mthca_cmd_context { u8 status; }; -static int fw_cmd_doorbell = 1; +static int fw_cmd_doorbell = 0; module_param(fw_cmd_doorbell, int, 0644); MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " "(and supported by FW)"); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 19765f6f8d58..07c13be07a4a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ind = qp->rq.next_ind; - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { - nreq = 0; - - doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); - doorbell[1] = cpu_to_be32(qp->qpn << 8); - - wmb(); - - mthca_write64(doorbell, - dev->kar + MTHCA_RECEIVE_DOORBELL, - MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); - - qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; - size0 = 0; - } - + for (nreq = 0; wr; wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, @@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; + + ++nreq; + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cpu_to_be32(qp->qpn << 8); + + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECEIVE_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + + qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; + size0 = 0; + } } out: diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index c32ce4348e1b..9cbdffa08dc2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target) /* XXX should send SRP_I_LOGOUT request */ init_completion(&target->done); - ib_send_cm_dreq(target->cm_id, NULL, 0); + if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { + printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); + return; + } wait_for_completion(&target->done); } @@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr) spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_DEAD) { spin_unlock_irq(target->scsi_host->host_lock); - scsi_host_put(target->scsi_host); return; } target->state = SRP_TARGET_REMOVED; @@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr) ib_destroy_cm_id(target->cm_id); srp_free_target_ib(target); scsi_host_put(target->scsi_host); - /* And another put to really free the target port... */ - scsi_host_put(target->scsi_host); } static int srp_connect_target(struct srp_target_port *target) @@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) list_for_each_entry_safe(req, tmp, &target->req_queue, list) if (req->scmnd->device == scmnd->device) { req->scmnd->result = DID_RESET << 16; - scmnd->scsi_done(scmnd); + req->scmnd->scsi_done(req->scmnd); srp_remove_req(target, req); } |