aboutsummaryrefslogtreecommitdiff
path: root/sys/cam/cam_xpt.c
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2013-10-21 12:00:26 +0000
committerAlexander Motin <mav@FreeBSD.org>2013-10-21 12:00:26 +0000
commit227d67aa5469398eb77e5eca2e525e6aae7b3a61 (patch)
tree2241bb08977b8fb8347e216b1dd8011b6627f617 /sys/cam/cam_xpt.c
parente45e2255e88f6ce44bae133e7e59910ac4e1177d (diff)
downloadsrc-227d67aa5469398eb77e5eca2e525e6aae7b3a61.tar.gz
src-227d67aa5469398eb77e5eca2e525e6aae7b3a61.zip
Merge CAM locking changes from the projects/camlock branch to radically
reduce lock congestion and improve SMP scalability of the SCSI/ATA stack, preparing the ground for the coming next GEOM direct dispatch support. Replace big per-SIM locks with bunch of smaller ones: - per-LUN locks to protect device and peripheral drivers state; - per-target locks to protect list of LUNs on target; - per-bus locks to protect reference counting; - per-send queue locks to protect queue of CCBs to be sent; - per-done queue locks to protect queue of completed CCBs; - remaining per-SIM locks now protect only HBA driver internals. While holding LUN lock it is allowed (while not recommended for performance reasons) to take SIM lock. The opposite acquisition order is forbidden. All the other locks are leaf locks, that can be taken anywhere, but should not be cascaded. Many functions, such as: xpt_action(), xpt_done(), xpt_async(), xpt_create_path(), etc. are no longer require (but allow) SIM lock to be held. To keep compatibility and solve cases where SIM lock can't be dropped, all xpt_async() calls in addition to xpt_done() calls are queued to completion threads for async processing in clean environment without SIM lock held. Instead of single CAM SWI thread, used for commands completion processing before, use multiple (depending on number of CPUs) threads. Load balanced between them using "hash" of the device B:T:L address. HBA drivers that can drop SIM lock during completion processing and have sufficient number of completion threads to efficiently scale to multiple CPUs can use new function xpt_done_direct() to avoid extra context switch. Make ahci(4) driver to use this mechanism depending on hardware setup. Sponsored by: iXsystems, Inc. MFC after: 2 months
Notes
Notes: svn path=/head/; revision=256843
Diffstat (limited to 'sys/cam/cam_xpt.c')
-rw-r--r--sys/cam/cam_xpt.c1689
1 files changed, 951 insertions, 738 deletions
diff --git a/sys/cam/cam_xpt.c b/sys/cam/cam_xpt.c
index 76ccb62a9a75..8d635e881070 100644
--- a/sys/cam/cam_xpt.c
+++ b/sys/cam/cam_xpt.c
@@ -40,7 +40,9 @@ __FBSDID("$FreeBSD$");
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/interrupt.h>
+#include <sys/proc.h>
#include <sys/sbuf.h>
+#include <sys/smp.h>
#include <sys/taskqueue.h>
#include <sys/lock.h>
@@ -117,6 +119,7 @@ struct xpt_softc {
struct mtx xpt_topo_lock;
struct mtx xpt_lock;
+ struct taskqueue *xpt_taskq;
};
typedef enum {
@@ -155,14 +158,19 @@ TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay);
SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
&xsoftc.boot_delay, 0, "Bus registration wait time");
-/* Queues for our software interrupt handler */
-typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
-typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
-static cam_simq_t cam_simq;
-static struct mtx cam_simq_lock;
+struct cam_doneq {
+ struct mtx_padalign cam_doneq_mtx;
+ STAILQ_HEAD(, ccb_hdr) cam_doneq;
+ int cam_doneq_sleep;
+};
+
+static struct cam_doneq cam_doneqs[MAXCPU];
+static int cam_num_doneqs;
+static struct proc *cam_proc;
-/* Pointers to software interrupt handlers */
-static void *cambio_ih;
+TUNABLE_INT("kern.cam.num_doneqs", &cam_num_doneqs);
+SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
+ &cam_num_doneqs, 0, "Number of completion queues/threads");
struct cam_periph *xpt_periph;
@@ -223,16 +231,20 @@ static void xpt_async_bcast(struct async_list *async_head,
void *async_arg);
static path_id_t xptnextfreepathid(void);
static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
-static union ccb *xpt_get_ccb(struct cam_ed *device);
-static void xpt_run_dev_allocq(struct cam_ed *device);
+static union ccb *xpt_get_ccb(struct cam_periph *periph);
+static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
+static void xpt_run_allocq(struct cam_periph *periph, int sleep);
+static void xpt_run_allocq_task(void *context, int pending);
static void xpt_run_devq(struct cam_devq *devq);
static timeout_t xpt_release_devq_timeout;
static void xpt_release_simq_timeout(void *arg) __unused;
+static void xpt_acquire_bus(struct cam_eb *bus);
static void xpt_release_bus(struct cam_eb *bus);
-static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
+static int xpt_release_devq_device(struct cam_ed *dev, u_int count,
int run_queue);
static struct cam_et*
xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
+static void xpt_acquire_target(struct cam_et *target);
static void xpt_release_target(struct cam_et *target);
static struct cam_eb*
xpt_find_bus(path_id_t path_id);
@@ -241,11 +253,14 @@ static struct cam_et*
static struct cam_ed*
xpt_find_device(struct cam_et *target, lun_id_t lun_id);
static void xpt_config(void *arg);
+static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
+ u_int32_t new_priority);
static xpt_devicefunc_t xptpassannouncefunc;
static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
static void xptpoll(struct cam_sim *sim);
-static void camisr(void *);
-static void camisr_runqueue(struct cam_sim *);
+static void camisr_runqueue(void);
+static void xpt_done_process(struct ccb_hdr *ccb_h);
+static void xpt_done_td(void *);
static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
u_int num_patterns, struct cam_eb *bus);
static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
@@ -296,7 +311,6 @@ static xpt_devicefunc_t xptsetasyncfunc;
static xpt_busfunc_t xptsetasyncbusfunc;
static cam_status xptregister(struct cam_periph *periph,
void *arg);
-static __inline int periph_is_queued(struct cam_periph *periph);
static __inline int device_is_queued(struct cam_ed *device);
static __inline int
@@ -304,6 +318,7 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
{
int retval;
+ mtx_assert(&devq->send_mtx, MA_OWNED);
if ((dev->ccbq.queue.entries > 0) &&
(dev->ccbq.dev_openings > 0) &&
(dev->ccbq.queue.qfrozen_cnt == 0)) {
@@ -314,7 +329,7 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
*/
retval =
xpt_schedule_dev(&devq->send_queue,
- &dev->devq_entry.pinfo,
+ &dev->devq_entry,
CAMQ_GET_PRIO(&dev->ccbq.queue));
} else {
retval = 0;
@@ -323,15 +338,9 @@ xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
}
static __inline int
-periph_is_queued(struct cam_periph *periph)
-{
- return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
-}
-
-static __inline int
device_is_queued(struct cam_ed *device)
{
- return (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX);
+ return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
}
static void
@@ -340,13 +349,6 @@ xpt_periph_init()
make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
}
-static void
-xptdone(struct cam_periph *periph, union ccb *done_ccb)
-{
- /* Caller will release the CCB */
- wakeup(&done_ccb->ccb_h.cbfcnp);
-}
-
static int
xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
{
@@ -457,8 +459,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
ccb = xpt_alloc_ccb();
- CAM_SIM_LOCK(bus->sim);
-
/*
* Create a path using the bus, target, and lun the
* user passed in.
@@ -469,7 +469,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
inccb->ccb_h.target_lun) !=
CAM_REQ_CMP){
error = EINVAL;
- CAM_SIM_UNLOCK(bus->sim);
xpt_free_ccb(ccb);
break;
}
@@ -477,12 +476,12 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
inccb->ccb_h.pinfo.priority);
xpt_merge_ccb(ccb, inccb);
- ccb->ccb_h.cbfcnp = xptdone;
+ xpt_path_lock(ccb->ccb_h.path);
cam_periph_runccb(ccb, NULL, 0, 0, NULL);
+ xpt_path_unlock(ccb->ccb_h.path);
bcopy(ccb, inccb, sizeof(union ccb));
xpt_free_path(ccb->ccb_h.path);
xpt_free_ccb(ccb);
- CAM_SIM_UNLOCK(bus->sim);
break;
case XPT_DEBUG: {
@@ -493,8 +492,6 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
* allocate it on the stack.
*/
- CAM_SIM_LOCK(bus->sim);
-
/*
* Create a path using the bus, target, and lun the
* user passed in.
@@ -505,18 +502,15 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
inccb->ccb_h.target_lun) !=
CAM_REQ_CMP){
error = EINVAL;
- CAM_SIM_UNLOCK(bus->sim);
break;
}
/* Ensure all of our fields are correct */
xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
inccb->ccb_h.pinfo.priority);
xpt_merge_ccb(&ccb, inccb);
- ccb.ccb_h.cbfcnp = xptdone;
xpt_action(&ccb);
bcopy(&ccb, inccb, sizeof(union ccb));
xpt_free_path(ccb.ccb_h.path);
- CAM_SIM_UNLOCK(bus->sim);
break;
}
@@ -564,9 +558,7 @@ xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *
/*
* This is an immediate CCB, we can send it on directly.
*/
- CAM_SIM_LOCK(xpt_path_sim(xpt_periph->path));
xpt_action(inccb);
- CAM_SIM_UNLOCK(xpt_path_sim(xpt_periph->path));
/*
* Map the buffers back into user space.
@@ -784,7 +776,7 @@ static void
xpt_scanner_thread(void *dummy)
{
union ccb *ccb;
- struct cam_sim *sim;
+ struct cam_path path;
xpt_lock_buses();
for (;;) {
@@ -795,10 +787,16 @@ xpt_scanner_thread(void *dummy)
TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
xpt_unlock_buses();
- sim = ccb->ccb_h.path->bus->sim;
- CAM_SIM_LOCK(sim);
+ /*
+ * Since lock can be dropped inside and path freed
+ * by completion callback even before return here,
+ * take our own path copy for reference.
+ */
+ xpt_copy_path(&path, ccb->ccb_h.path);
+ xpt_path_lock(&path);
xpt_action(ccb);
- CAM_SIM_UNLOCK(sim);
+ xpt_path_unlock(&path);
+ xpt_release_path(&path);
xpt_lock_buses();
}
@@ -857,16 +855,17 @@ xpt_init(void *dummy)
struct cam_path *path;
struct cam_devq *devq;
cam_status status;
+ int error, i;
TAILQ_INIT(&xsoftc.xpt_busses);
- TAILQ_INIT(&cam_simq);
TAILQ_INIT(&xsoftc.ccb_scanq);
STAILQ_INIT(&xsoftc.highpowerq);
xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
- mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
+ xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
+ taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
#ifdef CAM_BOOT_DELAY
/*
@@ -920,8 +919,26 @@ xpt_init(void *dummy)
path, NULL, 0, xpt_sim);
xpt_free_path(path);
mtx_unlock(&xsoftc.xpt_lock);
- /* Install our software interrupt handlers */
- swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
+ if (cam_num_doneqs < 1)
+ cam_num_doneqs = 1 + mp_ncpus / 6;
+ else if (cam_num_doneqs > MAXCPU)
+ cam_num_doneqs = MAXCPU;
+ for (i = 0; i < cam_num_doneqs; i++) {
+ mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
+ MTX_DEF);
+ STAILQ_INIT(&cam_doneqs[i].cam_doneq);
+ error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
+ &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
+ if (error != 0) {
+ cam_num_doneqs = i;
+ break;
+ }
+ }
+ if (cam_num_doneqs < 1) {
+ printf("xpt_init: Cannot init completion queues "
+ "- failing attach\n");
+ return (ENOMEM);
+ }
/*
* Register a callback for when interrupts are enabled.
*/
@@ -966,28 +983,15 @@ xpt_add_periph(struct cam_periph *periph)
{
struct cam_ed *device;
int32_t status;
- struct periph_list *periph_head;
-
- mtx_assert(periph->sim->mtx, MA_OWNED);
+ TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
device = periph->path->device;
-
- periph_head = &device->periphs;
-
status = CAM_REQ_CMP;
-
if (device != NULL) {
- /*
- * Make room for this peripheral
- * so it will fit in the queue
- * when it's scheduled to run
- */
- status = camq_resize(&device->drvq,
- device->drvq.array_size + 1);
-
+ mtx_lock(&device->target->bus->eb_mtx);
device->generation++;
-
- SLIST_INSERT_HEAD(periph_head, periph, periph_links);
+ SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
+ mtx_unlock(&device->target->bus->eb_mtx);
}
return (status);
@@ -998,21 +1002,12 @@ xpt_remove_periph(struct cam_periph *periph)
{
struct cam_ed *device;
- mtx_assert(periph->sim->mtx, MA_OWNED);
-
device = periph->path->device;
-
if (device != NULL) {
- struct periph_list *periph_head;
-
- periph_head = &device->periphs;
-
- /* Release the slot for this peripheral */
- camq_resize(&device->drvq, device->drvq.array_size - 1);
-
+ mtx_lock(&device->target->bus->eb_mtx);
device->generation++;
-
- SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
+ SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
+ mtx_unlock(&device->target->bus->eb_mtx);
}
}
@@ -1022,7 +1017,7 @@ xpt_announce_periph(struct cam_periph *periph, char *announce_string)
{
struct cam_path *path = periph->path;
- mtx_assert(periph->sim->mtx, MA_OWNED);
+ cam_periph_assert(periph, MA_OWNED);
periph->flags |= CAM_PERIPH_ANNOUNCED;
printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
@@ -1077,7 +1072,7 @@ xpt_denounce_periph(struct cam_periph *periph)
{
struct cam_path *path = periph->path;
- mtx_assert(periph->sim->mtx, MA_OWNED);
+ cam_periph_assert(periph, MA_OWNED);
printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n",
periph->periph_name, periph->unit_number,
path->bus->sim->sim_name,
@@ -1110,7 +1105,7 @@ xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
struct ccb_dev_advinfo cdai;
struct scsi_vpd_id_descriptor *idd;
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
+ xpt_path_assert(path, MA_OWNED);
memset(&cdai, 0, sizeof(cdai));
xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
@@ -1531,6 +1526,7 @@ static int
xptedtbusfunc(struct cam_eb *bus, void *arg)
{
struct ccb_dev_match *cdm;
+ struct cam_et *target;
dev_match_ret retval;
cdm = (struct ccb_dev_match *)arg;
@@ -1602,71 +1598,72 @@ xptedtbusfunc(struct cam_eb *bus, void *arg)
* If there is a target generation recorded, check it to
* make sure the target list hasn't changed.
*/
- if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (bus == cdm->pos.cookie.bus)
- && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
- && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
- && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
- bus->generation)) {
- cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
- return(0);
- }
-
+ mtx_lock(&bus->eb_mtx);
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
&& (cdm->pos.cookie.bus == bus)
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
- && (cdm->pos.cookie.target != NULL))
- return(xpttargettraverse(bus,
- (struct cam_et *)cdm->pos.cookie.target,
- xptedttargetfunc, arg));
- else
- return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
+ && (cdm->pos.cookie.target != NULL)) {
+ if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
+ bus->generation)) {
+ mtx_unlock(&bus->eb_mtx);
+ cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+ return (0);
+ }
+ target = (struct cam_et *)cdm->pos.cookie.target;
+ target->refcount++;
+ } else
+ target = NULL;
+ mtx_unlock(&bus->eb_mtx);
+
+ return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
}
static int
xptedttargetfunc(struct cam_et *target, void *arg)
{
struct ccb_dev_match *cdm;
+ struct cam_eb *bus;
+ struct cam_ed *device;
cdm = (struct ccb_dev_match *)arg;
+ bus = target->bus;
/*
* If there is a device list generation recorded, check it to
* make sure the device list hasn't changed.
*/
+ mtx_lock(&bus->eb_mtx);
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (cdm->pos.cookie.bus == target->bus)
+ && (cdm->pos.cookie.bus == bus)
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
&& (cdm->pos.cookie.target == target)
&& (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
- && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
- && (cdm->pos.generations[CAM_DEV_GENERATION] !=
- target->generation)) {
- cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
- return(0);
- }
+ && (cdm->pos.cookie.device != NULL)) {
+ if (cdm->pos.generations[CAM_DEV_GENERATION] !=
+ target->generation) {
+ mtx_unlock(&bus->eb_mtx);
+ cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+ return(0);
+ }
+ device = (struct cam_ed *)cdm->pos.cookie.device;
+ device->refcount++;
+ } else
+ device = NULL;
+ mtx_unlock(&bus->eb_mtx);
- if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (cdm->pos.cookie.bus == target->bus)
- && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
- && (cdm->pos.cookie.target == target)
- && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
- && (cdm->pos.cookie.device != NULL))
- return(xptdevicetraverse(target,
- (struct cam_ed *)cdm->pos.cookie.device,
- xptedtdevicefunc, arg));
- else
- return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
+ return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
}
static int
xptedtdevicefunc(struct cam_ed *device, void *arg)
{
-
+ struct cam_eb *bus;
+ struct cam_periph *periph;
struct ccb_dev_match *cdm;
dev_match_ret retval;
cdm = (struct ccb_dev_match *)arg;
+ bus = device->target->bus;
/*
* If our position is for something deeper in the tree, that means
@@ -1756,33 +1753,31 @@ xptedtdevicefunc(struct cam_ed *device, void *arg)
* If there is a peripheral list generation recorded, make sure
* it hasn't changed.
*/
+ xpt_lock_buses();
+ mtx_lock(&bus->eb_mtx);
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (device->target->bus == cdm->pos.cookie.bus)
- && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
- && (device->target == cdm->pos.cookie.target)
- && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
- && (device == cdm->pos.cookie.device)
- && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
- && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
- && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
- device->generation)){
- cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
- return(0);
- }
-
- if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (cdm->pos.cookie.bus == device->target->bus)
+ && (cdm->pos.cookie.bus == bus)
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
&& (cdm->pos.cookie.target == device->target)
&& (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
&& (cdm->pos.cookie.device == device)
&& (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
- && (cdm->pos.cookie.periph != NULL))
- return(xptperiphtraverse(device,
- (struct cam_periph *)cdm->pos.cookie.periph,
- xptedtperiphfunc, arg));
- else
- return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
+ && (cdm->pos.cookie.periph != NULL)) {
+ if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
+ device->generation) {
+ mtx_unlock(&bus->eb_mtx);
+ xpt_unlock_buses();
+ cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+ return(0);
+ }
+ periph = (struct cam_periph *)cdm->pos.cookie.periph;
+ periph->refcount++;
+ } else
+ periph = NULL;
+ mtx_unlock(&bus->eb_mtx);
+ xpt_unlock_buses();
+
+ return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
}
static int
@@ -1858,6 +1853,7 @@ xptedtperiphfunc(struct cam_periph *periph, void *arg)
static int
xptedtmatch(struct ccb_dev_match *cdm)
{
+ struct cam_eb *bus;
int ret;
cdm->num_matches = 0;
@@ -1866,19 +1862,22 @@ xptedtmatch(struct ccb_dev_match *cdm)
* Check the bus list generation. If it has changed, the user
* needs to reset everything and start over.
*/
+ xpt_lock_buses();
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
- && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
- cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
- return(0);
- }
+ && (cdm->pos.cookie.bus != NULL)) {
+ if (cdm->pos.generations[CAM_BUS_GENERATION] !=
+ xsoftc.bus_generation) {
+ xpt_unlock_buses();
+ cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+ return(0);
+ }
+ bus = (struct cam_eb *)cdm->pos.cookie.bus;
+ bus->refcount++;
+ } else
+ bus = NULL;
+ xpt_unlock_buses();
- if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
- && (cdm->pos.cookie.bus != NULL))
- ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
- xptedtbusfunc, cdm);
- else
- ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
+ ret = xptbustraverse(bus, xptedtbusfunc, cdm);
/*
* If we get back 0, that means that we had to stop before fully
@@ -1895,29 +1894,29 @@ xptedtmatch(struct ccb_dev_match *cdm)
static int
xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
{
+ struct cam_periph *periph;
struct ccb_dev_match *cdm;
cdm = (struct ccb_dev_match *)arg;
+ xpt_lock_buses();
if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
&& (cdm->pos.cookie.pdrv == pdrv)
&& (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
- && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
- && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
- (*pdrv)->generation)) {
- cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
- return(0);
- }
+ && (cdm->pos.cookie.periph != NULL)) {
+ if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
+ (*pdrv)->generation) {
+ xpt_unlock_buses();
+ cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
+ return(0);
+ }
+ periph = (struct cam_periph *)cdm->pos.cookie.periph;
+ periph->refcount++;
+ } else
+ periph = NULL;
+ xpt_unlock_buses();
- if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
- && (cdm->pos.cookie.pdrv == pdrv)
- && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
- && (cdm->pos.cookie.periph != NULL))
- return(xptpdperiphtraverse(pdrv,
- (struct cam_periph *)cdm->pos.cookie.periph,
- xptplistperiphfunc, arg));
- else
- return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
+ return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
}
static int
@@ -2066,35 +2065,31 @@ xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
int retval;
retval = 1;
-
- xpt_lock_buses();
- for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
- bus != NULL;
- bus = next_bus) {
-
+ if (start_bus)
+ bus = start_bus;
+ else {
+ xpt_lock_buses();
+ bus = TAILQ_FIRST(&xsoftc.xpt_busses);
+ if (bus == NULL) {
+ xpt_unlock_buses();
+ return (retval);
+ }
bus->refcount++;
-
- /*
- * XXX The locking here is obviously very complex. We
- * should work to simplify it.
- */
xpt_unlock_buses();
- CAM_SIM_LOCK(bus->sim);
+ }
+ for (; bus != NULL; bus = next_bus) {
retval = tr_func(bus, arg);
- CAM_SIM_UNLOCK(bus->sim);
-
+ if (retval == 0) {
+ xpt_release_bus(bus);
+ break;
+ }
xpt_lock_buses();
next_bus = TAILQ_NEXT(bus, links);
+ if (next_bus)
+ next_bus->refcount++;
xpt_unlock_buses();
-
xpt_release_bus(bus);
-
- if (retval == 0)
- return(retval);
- xpt_lock_buses();
}
- xpt_unlock_buses();
-
return(retval);
}
@@ -2105,24 +2100,32 @@ xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
struct cam_et *target, *next_target;
int retval;
- mtx_assert(bus->sim->mtx, MA_OWNED);
retval = 1;
- for (target = (start_target ? start_target :
- TAILQ_FIRST(&bus->et_entries));
- target != NULL; target = next_target) {
-
+ if (start_target)
+ target = start_target;
+ else {
+ mtx_lock(&bus->eb_mtx);
+ target = TAILQ_FIRST(&bus->et_entries);
+ if (target == NULL) {
+ mtx_unlock(&bus->eb_mtx);
+ return (retval);
+ }
target->refcount++;
-
+ mtx_unlock(&bus->eb_mtx);
+ }
+ for (; target != NULL; target = next_target) {
retval = tr_func(target, arg);
-
+ if (retval == 0) {
+ xpt_release_target(target);
+ break;
+ }
+ mtx_lock(&bus->eb_mtx);
next_target = TAILQ_NEXT(target, links);
-
+ if (next_target)
+ next_target->refcount++;
+ mtx_unlock(&bus->eb_mtx);
xpt_release_target(target);
-
- if (retval == 0)
- return(retval);
}
-
return(retval);
}
@@ -2130,36 +2133,39 @@ static int
xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
xpt_devicefunc_t *tr_func, void *arg)
{
+ struct cam_eb *bus;
struct cam_ed *device, *next_device;
int retval;
- mtx_assert(target->bus->sim->mtx, MA_OWNED);
retval = 1;
- for (device = (start_device ? start_device :
- TAILQ_FIRST(&target->ed_entries));
- device != NULL;
- device = next_device) {
-
- /*
- * Hold a reference so the current device does not go away
- * on us.
- */
+ bus = target->bus;
+ if (start_device)
+ device = start_device;
+ else {
+ mtx_lock(&bus->eb_mtx);
+ device = TAILQ_FIRST(&target->ed_entries);
+ if (device == NULL) {
+ mtx_unlock(&bus->eb_mtx);
+ return (retval);
+ }
device->refcount++;
-
+ mtx_unlock(&bus->eb_mtx);
+ }
+ for (; device != NULL; device = next_device) {
+ mtx_lock(&device->device_mtx);
retval = tr_func(device, arg);
-
- /*
- * Grab our next pointer before we release the current
- * device.
- */
+ mtx_unlock(&device->device_mtx);
+ if (retval == 0) {
+ xpt_release_device(device);
+ break;
+ }
+ mtx_lock(&bus->eb_mtx);
next_device = TAILQ_NEXT(device, links);
-
+ if (next_device)
+ next_device->refcount++;
+ mtx_unlock(&bus->eb_mtx);
xpt_release_device(device);
-
- if (retval == 0)
- return(retval);
}
-
return(retval);
}
@@ -2167,56 +2173,48 @@ static int
xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
xpt_periphfunc_t *tr_func, void *arg)
{
+ struct cam_eb *bus;
struct cam_periph *periph, *next_periph;
int retval;
retval = 1;
- mtx_assert(device->sim->mtx, MA_OWNED);
- xpt_lock_buses();
- for (periph = (start_periph ? start_periph :
- SLIST_FIRST(&device->periphs));
- periph != NULL;
- periph = next_periph) {
-
-
- /*
- * In this case, we want to show peripherals that have been
- * invalidated, but not peripherals that are scheduled to
- * be freed. So instead of calling cam_periph_acquire(),
- * which will fail if the periph has been invalidated, we
- * just check for the free flag here. If it is in the
- * process of being freed, we skip to the next periph.
- */
- if (periph->flags & CAM_PERIPH_FREE) {
- next_periph = SLIST_NEXT(periph, periph_links);
- continue;
+ bus = device->target->bus;
+ if (start_periph)
+ periph = start_periph;
+ else {
+ xpt_lock_buses();
+ mtx_lock(&bus->eb_mtx);
+ periph = SLIST_FIRST(&device->periphs);
+ while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
+ periph = SLIST_NEXT(periph, periph_links);
+ if (periph == NULL) {
+ mtx_unlock(&bus->eb_mtx);
+ xpt_unlock_buses();
+ return (retval);
}
-
- /*
- * Acquire a reference to this periph while we call the
- * traversal function, so it can't go away.
- */
periph->refcount++;
-
+ mtx_unlock(&bus->eb_mtx);
+ xpt_unlock_buses();
+ }
+ for (; periph != NULL; periph = next_periph) {
retval = tr_func(periph, arg);
-
- /*
- * Grab the next peripheral before we release this one, so
- * our next pointer is still valid.
- */
+ if (retval == 0) {
+ cam_periph_release(periph);
+ break;
+ }
+ xpt_lock_buses();
+ mtx_lock(&bus->eb_mtx);
next_periph = SLIST_NEXT(periph, periph_links);
-
- cam_periph_release_locked_buses(periph);
-
- if (retval == 0)
- goto bailout_done;
+ while (next_periph != NULL &&
+ (next_periph->flags & CAM_PERIPH_FREE) != 0)
+ next_periph = SLIST_NEXT(periph, periph_links);
+ if (next_periph)
+ next_periph->refcount++;
+ mtx_unlock(&bus->eb_mtx);
+ xpt_unlock_buses();
+ cam_periph_release_locked(periph);
}
-
-bailout_done:
-
- xpt_unlock_buses();
-
return(retval);
}
@@ -2254,57 +2252,42 @@ xptpdperiphtraverse(struct periph_driver **pdrv,
xpt_periphfunc_t *tr_func, void *arg)
{
struct cam_periph *periph, *next_periph;
- struct cam_sim *sim;
int retval;
retval = 1;
- xpt_lock_buses();
- for (periph = (start_periph ? start_periph :
- TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
- periph = next_periph) {
-
-
- /*
- * In this case, we want to show peripherals that have been
- * invalidated, but not peripherals that are scheduled to
- * be freed. So instead of calling cam_periph_acquire(),
- * which will fail if the periph has been invalidated, we
- * just check for the free flag here. If it is free, we
- * skip to the next periph.
- */
- if (periph->flags & CAM_PERIPH_FREE) {
- next_periph = TAILQ_NEXT(periph, unit_links);
- continue;
+ if (start_periph)
+ periph = start_periph;
+ else {
+ xpt_lock_buses();
+ periph = TAILQ_FIRST(&(*pdrv)->units);
+ while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
+ periph = TAILQ_NEXT(periph, unit_links);
+ if (periph == NULL) {
+ xpt_unlock_buses();
+ return (retval);
}
-
- /*
- * Acquire a reference to this periph while we call the
- * traversal function, so it can't go away.
- */
periph->refcount++;
- sim = periph->sim;
xpt_unlock_buses();
- CAM_SIM_LOCK(sim);
- xpt_lock_buses();
+ }
+ for (; periph != NULL; periph = next_periph) {
+ cam_periph_lock(periph);
retval = tr_func(periph, arg);
-
- /*
- * Grab the next peripheral before we release this one, so
- * our next pointer is still valid.
- */
+ cam_periph_unlock(periph);
+ if (retval == 0) {
+ cam_periph_release(periph);
+ break;
+ }
+ xpt_lock_buses();
next_periph = TAILQ_NEXT(periph, unit_links);
-
- cam_periph_release_locked_buses(periph);
- CAM_SIM_UNLOCK(sim);
-
- if (retval == 0)
- goto bailout_done;
+ while (next_periph != NULL &&
+ (next_periph->flags & CAM_PERIPH_FREE) != 0)
+ next_periph = TAILQ_NEXT(periph, unit_links);
+ if (next_periph)
+ next_periph->refcount++;
+ xpt_unlock_buses();
+ cam_periph_release(periph);
}
-bailout_done:
-
- xpt_unlock_buses();
-
return(retval);
}
@@ -2450,12 +2433,14 @@ xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
bus->path_id,
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD);
+ xpt_path_lock(&path);
xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
cpi.ccb_h.func_code = XPT_PATH_INQ;
xpt_action((union ccb *)&cpi);
csa->callback(csa->callback_arg,
AC_PATH_REGISTERED,
&path, &cpi);
+ xpt_path_unlock(&path);
xpt_release_path(&path);
return(1);
@@ -2475,6 +2460,8 @@ void
xpt_action_default(union ccb *start_ccb)
{
struct cam_path *path;
+ struct cam_sim *sim;
+ int lock;
path = start_ccb->ccb_h.path;
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n"));
@@ -2523,14 +2510,18 @@ xpt_action_default(union ccb *start_ccb)
case XPT_RESET_DEV:
case XPT_ENG_EXEC:
case XPT_SMP_IO:
+ {
+ struct cam_devq *devq;
+
+ devq = path->bus->sim->devq;
+ mtx_lock(&devq->send_mtx);
cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
- if (xpt_schedule_devq(path->bus->sim->devq, path->device))
- xpt_run_devq(path->bus->sim->devq);
+ if (xpt_schedule_devq(devq, path->device) != 0)
+ xpt_run_devq(devq);
+ mtx_unlock(&devq->send_mtx);
break;
+ }
case XPT_CALC_GEOMETRY:
- {
- struct cam_sim *sim;
-
/* Filter out garbage */
if (start_ccb->ccg.block_size == 0
|| start_ccb->ccg.volume_size == 0) {
@@ -2558,10 +2549,7 @@ xpt_action_default(union ccb *start_ccb)
break;
}
#endif
- sim = path->bus->sim;
- (*(sim->sim_action))(sim, start_ccb);
- break;
- }
+ goto call_sim;
case XPT_ABORT:
{
union ccb* abort_ccb;
@@ -2622,21 +2610,18 @@ xpt_action_default(union ccb *start_ccb)
case XPT_NOTIFY_ACKNOWLEDGE:
case XPT_GET_SIM_KNOB:
case XPT_SET_SIM_KNOB:
- {
- struct cam_sim *sim;
-
- sim = path->bus->sim;
- (*(sim->sim_action))(sim, start_ccb);
- break;
- }
+ case XPT_GET_TRAN_SETTINGS:
+ case XPT_SET_TRAN_SETTINGS:
case XPT_PATH_INQ:
- {
- struct cam_sim *sim;
-
+call_sim:
sim = path->bus->sim;
+ lock = (mtx_owned(sim->mtx) == 0);
+ if (lock)
+ CAM_SIM_LOCK(sim);
(*(sim->sim_action))(sim, start_ccb);
+ if (lock)
+ CAM_SIM_UNLOCK(sim);
break;
- }
case XPT_PATH_STATS:
start_ccb->cpis.last_reset = path->bus->last_reset;
start_ccb->ccb_h.status = CAM_REQ_CMP;
@@ -2798,11 +2783,6 @@ xpt_action_default(union ccb *start_ccb)
position_type = CAM_DEV_POS_PDRV;
}
- /*
- * Note that we drop the SIM lock here, because the EDT
- * traversal code needs to do its own locking.
- */
- CAM_SIM_UNLOCK(xpt_path_sim(cdm->ccb_h.path));
switch(position_type & CAM_DEV_POS_TYPEMASK) {
case CAM_DEV_POS_EDT:
xptedtmatch(cdm);
@@ -2814,7 +2794,6 @@ xpt_action_default(union ccb *start_ccb)
cdm->status = CAM_DEV_MATCH_ERROR;
break;
}
- CAM_SIM_LOCK(xpt_path_sim(cdm->ccb_h.path));
if (cdm->status == CAM_DEV_MATCH_ERROR)
start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
@@ -2869,6 +2848,8 @@ xpt_action_default(union ccb *start_ccb)
break;
}
cur_entry->event_enable = csa->event_enable;
+ cur_entry->event_lock =
+ mtx_owned(path->bus->sim->mtx) ? 1 : 0;
cur_entry->callback_arg = csa->callback_arg;
cur_entry->callback = csa->callback;
SLIST_INSERT_HEAD(async_head, cur_entry, links);
@@ -2903,6 +2884,7 @@ xpt_action_default(union ccb *start_ccb)
}
}
+ mtx_lock(&dev->sim->devq->send_mtx);
if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
@@ -2955,6 +2937,7 @@ xpt_action_default(union ccb *start_ccb)
start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
}
}
+ mtx_unlock(&dev->sim->devq->send_mtx);
if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
@@ -2964,7 +2947,6 @@ xpt_action_default(union ccb *start_ccb)
}
case XPT_DEBUG: {
struct cam_path *oldpath;
- struct cam_sim *oldsim;
/* Check that all request bits are supported. */
if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
@@ -2974,15 +2956,9 @@ xpt_action_default(union ccb *start_ccb)
cam_dflags = CAM_DEBUG_NONE;
if (cam_dpath != NULL) {
- /* To release the old path we must hold proper lock. */
oldpath = cam_dpath;
cam_dpath = NULL;
- oldsim = xpt_path_sim(oldpath);
- CAM_SIM_UNLOCK(xpt_path_sim(start_ccb->ccb_h.path));
- CAM_SIM_LOCK(oldsim);
xpt_free_path(oldpath);
- CAM_SIM_UNLOCK(oldsim);
- CAM_SIM_LOCK(xpt_path_sim(start_ccb->ccb_h.path));
}
if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
if (xpt_create_path(&cam_dpath, NULL,
@@ -3029,39 +3005,41 @@ xpt_polled_action(union ccb *start_ccb)
struct cam_devq *devq;
struct cam_ed *dev;
-
timeout = start_ccb->ccb_h.timeout * 10;
sim = start_ccb->ccb_h.path->bus->sim;
devq = sim->devq;
dev = start_ccb->ccb_h.path->device;
- mtx_assert(sim->mtx, MA_OWNED);
-
- /* Don't use ISR for this SIM while polling. */
- sim->flags |= CAM_SIM_POLLED;
+ mtx_unlock(&dev->device_mtx);
/*
* Steal an opening so that no other queued requests
* can get it before us while we simulate interrupts.
*/
+ mtx_lock(&devq->send_mtx);
dev->ccbq.devq_openings--;
dev->ccbq.dev_openings--;
-
- while(((devq != NULL && devq->send_openings <= 0) ||
- dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
+ while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
+ (--timeout > 0)) {
+ mtx_unlock(&devq->send_mtx);
DELAY(100);
+ CAM_SIM_LOCK(sim);
(*(sim->sim_poll))(sim);
- camisr_runqueue(sim);
+ CAM_SIM_UNLOCK(sim);
+ camisr_runqueue();
+ mtx_lock(&devq->send_mtx);
}
-
dev->ccbq.devq_openings++;
dev->ccbq.dev_openings++;
+ mtx_unlock(&devq->send_mtx);
if (timeout != 0) {
xpt_action(start_ccb);
while(--timeout > 0) {
+ CAM_SIM_LOCK(sim);
(*(sim->sim_poll))(sim);
- camisr_runqueue(sim);
+ CAM_SIM_UNLOCK(sim);
+ camisr_runqueue();
if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
!= CAM_REQ_INPROG)
break;
@@ -3080,8 +3058,7 @@ xpt_polled_action(union ccb *start_ccb)
start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
}
- /* We will use CAM ISR for this SIM again. */
- sim->flags &= ~CAM_SIM_POLLED;
+ mtx_lock(&dev->device_mtx);
}
/*
@@ -3089,38 +3066,14 @@ xpt_polled_action(union ccb *start_ccb)
* target device has space for more transactions.
*/
void
-xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
+xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
{
- struct cam_ed *device;
- int runq = 0;
- mtx_assert(perph->sim->mtx, MA_OWNED);
-
- CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
- device = perph->path->device;
- if (periph_is_queued(perph)) {
- /* Simply reorder based on new priority */
- CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
- (" change priority to %d\n", new_priority));
- if (new_priority < perph->pinfo.priority) {
- camq_change_priority(&device->drvq,
- perph->pinfo.index,
- new_priority);
- runq = 1;
- }
- } else {
- /* New entry on the queue */
- CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
- (" added periph to queue\n"));
- perph->pinfo.priority = new_priority;
- perph->pinfo.generation = ++device->drvq.generation;
- camq_insert(&device->drvq, &perph->pinfo);
- runq = 1;
- }
- if (runq != 0) {
- CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
- (" calling xpt_run_dev_allocq\n"));
- xpt_run_dev_allocq(device);
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
+ cam_periph_assert(periph, MA_OWNED);
+ if (new_priority < periph->scheduled_priority) {
+ periph->scheduled_priority = new_priority;
+ xpt_run_allocq(periph, 0);
}
}
@@ -3133,7 +3086,7 @@ xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
* started the queue, return 0 so the caller doesn't attempt
* to run the queue.
*/
-int
+static int
xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
u_int32_t new_priority)
{
@@ -3173,51 +3126,78 @@ xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
}
static void
-xpt_run_dev_allocq(struct cam_ed *device)
+xpt_run_allocq_task(void *context, int pending)
+{
+ struct cam_periph *periph = context;
+
+ cam_periph_lock(periph);
+ periph->flags &= ~CAM_PERIPH_RUN_TASK;
+ xpt_run_allocq(periph, 1);
+ cam_periph_unlock(periph);
+ cam_periph_release(periph);
+}
+
+static void
+xpt_run_allocq(struct cam_periph *periph, int sleep)
{
- struct camq *drvq;
+ struct cam_ed *device;
+ union ccb *ccb;
+ uint32_t prio;
- if (device->ccbq.devq_allocating)
+ cam_periph_assert(periph, MA_OWNED);
+ if (periph->periph_allocating)
return;
- device->ccbq.devq_allocating = 1;
- CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq(%p)\n", device));
- drvq = &device->drvq;
- while ((drvq->entries > 0) &&
- (device->ccbq.devq_openings > 0 ||
- CAMQ_GET_PRIO(drvq) <= CAM_PRIORITY_OOB) &&
- (device->ccbq.queue.qfrozen_cnt == 0)) {
- union ccb *work_ccb;
- struct cam_periph *drv;
-
- KASSERT(drvq->entries > 0, ("xpt_run_dev_allocq: "
- "Device on queue without any work to do"));
- if ((work_ccb = xpt_get_ccb(device)) != NULL) {
- drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
- xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
- drv->pinfo.priority);
+ periph->periph_allocating = 1;
+ CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
+ device = periph->path->device;
+ ccb = NULL;
+restart:
+ while ((prio = min(periph->scheduled_priority,
+ periph->immediate_priority)) != CAM_PRIORITY_NONE &&
+ (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
+ device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
+
+ if (ccb == NULL &&
+ (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
+ if (sleep) {
+ ccb = xpt_get_ccb(periph);
+ goto restart;
+ }
+ if (periph->flags & CAM_PERIPH_RUN_TASK) {
+ break;
+ }
+ cam_periph_acquire(periph);
+ periph->flags |= CAM_PERIPH_RUN_TASK;
+ taskqueue_enqueue(xsoftc.xpt_taskq,
+ &periph->periph_run_task);
+ break;
+ }
+ xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
+ if (prio == periph->immediate_priority) {
+ periph->immediate_priority = CAM_PRIORITY_NONE;
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
- ("calling periph start\n"));
- drv->periph_start(drv, work_ccb);
+ ("waking cam_periph_getccb()\n"));
+ SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
+ periph_links.sle);
+ wakeup(&periph->ccb_list);
} else {
- /*
- * Malloc failure in alloc_ccb
- */
- /*
- * XXX add us to a list to be run from free_ccb
- * if we don't have any ccbs active on this
- * device queue otherwise we may never get run
- * again.
- */
- break;
+ periph->scheduled_priority = CAM_PRIORITY_NONE;
+ CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
+ ("calling periph_start()\n"));
+ periph->periph_start(periph, ccb);
}
+ ccb = NULL;
}
- device->ccbq.devq_allocating = 0;
+ if (ccb != NULL)
+ xpt_release_ccb(ccb);
+ periph->periph_allocating = 0;
}
static void
xpt_run_devq(struct cam_devq *devq)
{
char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
+ int lock;
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
@@ -3225,14 +3205,12 @@ xpt_run_devq(struct cam_devq *devq)
while ((devq->send_queue.entries > 0)
&& (devq->send_openings > 0)
&& (devq->send_queue.qfrozen_cnt <= 1)) {
- struct cam_ed_qinfo *qinfo;
struct cam_ed *device;
union ccb *work_ccb;
struct cam_sim *sim;
- qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
+ device = (struct cam_ed *)camq_remove(&devq->send_queue,
CAMQ_HEAD);
- device = qinfo->device;
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
("running device %p\n", device));
@@ -3253,8 +3231,7 @@ xpt_run_devq(struct cam_devq *devq)
* available.
*/
xpt_freeze_devq(work_ccb->ccb_h.path, 1);
- STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
- work_ccb->ccb_h.path->device,
+ STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
highpowerq_entry);
mtx_unlock(&xsoftc.xpt_lock);
@@ -3270,11 +3247,10 @@ xpt_run_devq(struct cam_devq *devq)
}
cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
cam_ccbq_send_ccb(&device->ccbq, work_ccb);
-
devq->send_openings--;
devq->send_active++;
-
xpt_schedule_devq(devq, device);
+ mtx_unlock(&devq->send_mtx);
if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
/*
@@ -3318,13 +3294,18 @@ xpt_run_devq(struct cam_devq *devq)
}
/*
- * Device queues can be shared among multiple sim instances
- * that reside on different busses. Use the SIM in the queue
- * CCB's path, rather than the one in the bus that was passed
- * into this function.
+ * Device queues can be shared among multiple SIM instances
+ * that reside on different busses. Use the SIM from the
+ * queued device, rather than the one from the calling bus.
*/
- sim = work_ccb->ccb_h.path->bus->sim;
+ sim = device->sim;
+ lock = (mtx_owned(sim->mtx) == 0);
+ if (lock)
+ CAM_SIM_LOCK(sim);
(*(sim->sim_action))(sim, work_ccb);
+ if (lock)
+ CAM_SIM_UNLOCK(sim);
+ mtx_lock(&devq->send_mtx);
}
devq->send_queue.qfrozen_cnt--;
}
@@ -3400,26 +3381,9 @@ xpt_create_path_unlocked(struct cam_path **new_path_ptr,
struct cam_periph *periph, path_id_t path_id,
target_id_t target_id, lun_id_t lun_id)
{
- struct cam_path *path;
- struct cam_eb *bus = NULL;
- cam_status status;
-
- path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_WAITOK);
- bus = xpt_find_bus(path_id);
- if (bus != NULL)
- CAM_SIM_LOCK(bus->sim);
- status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
- if (bus != NULL) {
- CAM_SIM_UNLOCK(bus->sim);
- xpt_release_bus(bus);
- }
- if (status != CAM_REQ_CMP) {
- free(path, M_CAMPATH);
- path = NULL;
- }
- *new_path_ptr = path;
- return (status);
+ return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
+ lun_id));
}
cam_status
@@ -3443,6 +3407,8 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
if (bus == NULL) {
status = CAM_PATH_INVALID;
} else {
+ xpt_lock_buses();
+ mtx_lock(&bus->eb_mtx);
target = xpt_find_target(bus, target_id);
if (target == NULL) {
/* Create one */
@@ -3455,6 +3421,7 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
target = new_target;
}
}
+ xpt_unlock_buses();
if (target != NULL) {
device = xpt_find_device(target, lun_id);
if (device == NULL) {
@@ -3472,6 +3439,7 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
}
}
}
+ mtx_unlock(&bus->eb_mtx);
}
/*
@@ -3494,6 +3462,32 @@ xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
return (status);
}
+cam_status
+xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
+{
+ struct cam_path *new_path;
+
+ new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
+ if (new_path == NULL)
+ return(CAM_RESRC_UNAVAIL);
+ xpt_copy_path(new_path, path);
+ *new_path_ptr = new_path;
+ return (CAM_REQ_CMP);
+}
+
+void
+xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
+{
+
+ *new_path = *path;
+ if (path->bus != NULL)
+ xpt_acquire_bus(path->bus);
+ if (path->target != NULL)
+ xpt_acquire_target(path->target);
+ if (path->device != NULL)
+ xpt_acquire_device(path->device);
+}
+
void
xpt_release_path(struct cam_path *path)
{
@@ -3688,11 +3682,6 @@ xpt_path_string(struct cam_path *path, char *str, size_t str_len)
{
struct sbuf sb;
-#ifdef INVARIANTS
- if (path != NULL && path->bus != NULL)
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
-#endif
-
sbuf_new(&sb, str, str_len, 0);
if (path == NULL)
@@ -3760,7 +3749,6 @@ xpt_path_sim(struct cam_path *path)
struct cam_periph*
xpt_path_periph(struct cam_path *path)
{
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
return (path->periph);
}
@@ -3813,28 +3801,18 @@ xpt_path_legacy_ata_id(struct cam_path *path)
void
xpt_release_ccb(union ccb *free_ccb)
{
- struct cam_path *path;
struct cam_ed *device;
- struct cam_eb *bus;
- struct cam_sim *sim;
+ struct cam_periph *periph;
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
- path = free_ccb->ccb_h.path;
- device = path->device;
- bus = path->bus;
- sim = bus->sim;
-
- mtx_assert(sim->mtx, MA_OWNED);
+ xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
+ device = free_ccb->ccb_h.path->device;
+ periph = free_ccb->ccb_h.path->periph;
+ xpt_free_ccb(free_ccb);
+ periph->periph_allocated--;
cam_ccbq_release_opening(&device->ccbq);
- if (sim->ccb_count > sim->max_ccbs) {
- xpt_free_ccb(free_ccb);
- sim->ccb_count--;
- } else {
- SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
- xpt_links.sle);
- }
- xpt_run_dev_allocq(device);
+ xpt_run_allocq(periph, 0);
}
/* Functions accessed by SIM drivers */
@@ -3867,12 +3845,13 @@ xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
sim->bus_id = bus;
new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
- M_CAMXPT, M_NOWAIT);
+ M_CAMXPT, M_NOWAIT|M_ZERO);
if (new_bus == NULL) {
/* Couldn't satisfy request */
return (CAM_RESRC_UNAVAIL);
}
+ mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
TAILQ_INIT(&new_bus->et_entries);
cam_sim_hold(sim);
new_bus->sim = sim;
@@ -4078,80 +4057,138 @@ xpt_async_string(u_int32_t async_code)
return ("AC_UNKNOWN");
}
-void
-xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
+static int
+xpt_async_size(u_int32_t async_code)
{
- struct cam_eb *bus;
- struct cam_et *target, *next_target;
- struct cam_ed *device, *next_device;
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
- CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
- ("xpt_async(%s)\n", xpt_async_string(async_code)));
+ switch (async_code) {
+ case AC_BUS_RESET: return (0);
+ case AC_UNSOL_RESEL: return (0);
+ case AC_SCSI_AEN: return (0);
+ case AC_SENT_BDR: return (0);
+ case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
+ case AC_PATH_DEREGISTERED: return (0);
+ case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
+ case AC_LOST_DEVICE: return (0);
+ case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
+ case AC_INQ_CHANGED: return (0);
+ case AC_GETDEV_CHANGED: return (0);
+ case AC_CONTRACT: return (sizeof(struct ac_contract));
+ case AC_ADVINFO_CHANGED: return (-1);
+ case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
+ }
+ return (0);
+}
+
+static int
+xpt_async_process_dev(struct cam_ed *device, void *arg)
+{
+ union ccb *ccb = arg;
+ struct cam_path *path = ccb->ccb_h.path;
+ void *async_arg = ccb->casync.async_arg_ptr;
+ u_int32_t async_code = ccb->casync.async_code;
+ int relock;
+
+ if (path->device != device
+ && path->device->lun_id != CAM_LUN_WILDCARD
+ && device->lun_id != CAM_LUN_WILDCARD)
+ return (1);
/*
- * Most async events come from a CAM interrupt context. In
- * a few cases, the error recovery code at the peripheral layer,
- * which may run from our SWI or a process context, may signal
- * deferred events with a call to xpt_async.
+ * The async callback could free the device.
+ * If it is a broadcast async, it doesn't hold
+ * device reference, so take our own reference.
*/
+ xpt_acquire_device(device);
- bus = path->bus;
+ /*
+ * If async for specific device is to be delivered to
+ * the wildcard client, take the specific device lock.
+ * XXX: We may need a way for client to specify it.
+ */
+ if ((device->lun_id == CAM_LUN_WILDCARD &&
+ path->device->lun_id != CAM_LUN_WILDCARD) ||
+ (device->target->target_id == CAM_TARGET_WILDCARD &&
+ path->target->target_id != CAM_TARGET_WILDCARD) ||
+ (device->target->bus->path_id == CAM_BUS_WILDCARD &&
+ path->target->bus->path_id != CAM_BUS_WILDCARD)) {
+ mtx_unlock(&device->device_mtx);
+ xpt_path_lock(path);
+ relock = 1;
+ } else
+ relock = 0;
- if (async_code == AC_BUS_RESET) {
- /* Update our notion of when the last reset occurred */
- microtime(&bus->last_reset);
- }
+ (*(device->target->bus->xport->async))(async_code,
+ device->target->bus, device->target, device, async_arg);
+ xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
- for (target = TAILQ_FIRST(&bus->et_entries);
- target != NULL;
- target = next_target) {
+ if (relock) {
+ xpt_path_unlock(path);
+ mtx_lock(&device->device_mtx);
+ }
+ xpt_release_device(device);
+ return (1);
+}
- next_target = TAILQ_NEXT(target, links);
+static int
+xpt_async_process_tgt(struct cam_et *target, void *arg)
+{
+ union ccb *ccb = arg;
+ struct cam_path *path = ccb->ccb_h.path;
- if (path->target != target
- && path->target->target_id != CAM_TARGET_WILDCARD
- && target->target_id != CAM_TARGET_WILDCARD)
- continue;
+ if (path->target != target
+ && path->target->target_id != CAM_TARGET_WILDCARD
+ && target->target_id != CAM_TARGET_WILDCARD)
+ return (1);
- if (async_code == AC_SENT_BDR) {
- /* Update our notion of when the last reset occurred */
- microtime(&path->target->last_reset);
- }
+ if (ccb->casync.async_code == AC_SENT_BDR) {
+ /* Update our notion of when the last reset occurred */
+ microtime(&target->last_reset);
+ }
- for (device = TAILQ_FIRST(&target->ed_entries);
- device != NULL;
- device = next_device) {
+ return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
+}
- next_device = TAILQ_NEXT(device, links);
+static void
+xpt_async_process(struct cam_periph *periph, union ccb *ccb)
+{
+ struct cam_eb *bus;
+ struct cam_path *path;
+ void *async_arg;
+ u_int32_t async_code;
- if (path->device != device
- && path->device->lun_id != CAM_LUN_WILDCARD
- && device->lun_id != CAM_LUN_WILDCARD)
- continue;
- /*
- * The async callback could free the device.
- * If it is a broadcast async, it doesn't hold
- * device reference, so take our own reference.
- */
- xpt_acquire_device(device);
- (*(bus->xport->async))(async_code, bus,
- target, device,
- async_arg);
+ path = ccb->ccb_h.path;
+ async_code = ccb->casync.async_code;
+ async_arg = ccb->casync.async_arg_ptr;
+ CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
+ ("xpt_async(%s)\n", xpt_async_string(async_code)));
+ bus = path->bus;
- xpt_async_bcast(&device->asyncs, async_code,
- path, async_arg);
- xpt_release_device(device);
- }
+ if (async_code == AC_BUS_RESET) {
+ /* Update our notion of when the last reset occurred */
+ microtime(&bus->last_reset);
}
+ xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
+
/*
* If this wasn't a fully wildcarded async, tell all
* clients that want all async events.
*/
- if (bus != xpt_periph->path->bus)
- xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
- path, async_arg);
+ if (bus != xpt_periph->path->bus) {
+ xpt_path_lock(xpt_periph->path);
+ xpt_async_process_dev(xpt_periph->path->device, ccb);
+ xpt_path_unlock(xpt_periph->path);
+ }
+
+ if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
+ xpt_release_devq(path, 1, TRUE);
+ else
+ xpt_release_simq(path->bus->sim, TRUE);
+ if (ccb->casync.async_arg_size > 0)
+ free(async_arg, M_CAMXPT);
+ xpt_free_path(path);
+ xpt_free_ccb(ccb);
}
static void
@@ -4160,6 +4197,7 @@ xpt_async_bcast(struct async_list *async_head,
struct cam_path *path, void *async_arg)
{
struct async_node *cur_entry;
+ int lock;
cur_entry = SLIST_FIRST(async_head);
while (cur_entry != NULL) {
@@ -4170,72 +4208,149 @@ xpt_async_bcast(struct async_list *async_head,
* can delete its async callback entry.
*/
next_entry = SLIST_NEXT(cur_entry, links);
- if ((cur_entry->event_enable & async_code) != 0)
+ if ((cur_entry->event_enable & async_code) != 0) {
+ lock = cur_entry->event_lock;
+ if (lock)
+ CAM_SIM_LOCK(path->device->sim);
cur_entry->callback(cur_entry->callback_arg,
async_code, path,
async_arg);
+ if (lock)
+ CAM_SIM_UNLOCK(path->device->sim);
+ }
cur_entry = next_entry;
}
}
+void
+xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
+{
+ union ccb *ccb;
+ int size;
+
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ xpt_print(path, "Can't allocate CCB to send %s\n",
+ xpt_async_string(async_code));
+ return;
+ }
+
+ if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
+ xpt_print(path, "Can't allocate path to send %s\n",
+ xpt_async_string(async_code));
+ xpt_free_ccb(ccb);
+ return;
+ }
+ ccb->ccb_h.path->periph = NULL;
+ ccb->ccb_h.func_code = XPT_ASYNC;
+ ccb->ccb_h.cbfcnp = xpt_async_process;
+ ccb->ccb_h.flags |= CAM_UNLOCKED;
+ ccb->casync.async_code = async_code;
+ ccb->casync.async_arg_size = 0;
+ size = xpt_async_size(async_code);
+ if (size > 0 && async_arg != NULL) {
+ ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
+ if (ccb->casync.async_arg_ptr == NULL) {
+ xpt_print(path, "Can't allocate argument to send %s\n",
+ xpt_async_string(async_code));
+ xpt_free_path(ccb->ccb_h.path);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ memcpy(ccb->casync.async_arg_ptr, async_arg, size);
+ ccb->casync.async_arg_size = size;
+ } else if (size < 0)
+ ccb->casync.async_arg_size = size;
+ if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
+ xpt_freeze_devq(path, 1);
+ else
+ xpt_freeze_simq(path->bus->sim, 1);
+ xpt_done(ccb);
+}
+
static void
xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
struct cam_et *target, struct cam_ed *device,
void *async_arg)
{
+
+ /*
+ * We only need to handle events for real devices.
+ */
+ if (target->target_id == CAM_TARGET_WILDCARD
+ || device->lun_id == CAM_LUN_WILDCARD)
+ return;
+
printf("%s called\n", __func__);
}
u_int32_t
xpt_freeze_devq(struct cam_path *path, u_int count)
{
- struct cam_ed *dev = path->device;
+ struct cam_ed *dev = path->device;
+ struct cam_devq *devq;
+ uint32_t freeze;
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
+ devq = dev->sim->devq;
+ mtx_lock(&devq->send_mtx);
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq() %u->%u\n",
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
- dev->ccbq.queue.qfrozen_cnt += count;
+ freeze = (dev->ccbq.queue.qfrozen_cnt += count);
/* Remove frozen device from sendq. */
- if (device_is_queued(dev)) {
- camq_remove(&dev->sim->devq->send_queue,
- dev->devq_entry.pinfo.index);
- }
- return (dev->ccbq.queue.qfrozen_cnt);
+ if (device_is_queued(dev))
+ camq_remove(&devq->send_queue, dev->devq_entry.index);
+ mtx_unlock(&devq->send_mtx);
+ return (freeze);
}
u_int32_t
xpt_freeze_simq(struct cam_sim *sim, u_int count)
{
+ struct cam_devq *devq;
+ uint32_t freeze;
- mtx_assert(sim->mtx, MA_OWNED);
- sim->devq->send_queue.qfrozen_cnt += count;
- return (sim->devq->send_queue.qfrozen_cnt);
+ devq = sim->devq;
+ mtx_lock(&devq->send_mtx);
+ freeze = (devq->send_queue.qfrozen_cnt += count);
+ mtx_unlock(&devq->send_mtx);
+ return (freeze);
}
static void
xpt_release_devq_timeout(void *arg)
{
- struct cam_ed *device;
+ struct cam_ed *dev;
+ struct cam_devq *devq;
- device = (struct cam_ed *)arg;
- CAM_DEBUG_DEV(device, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
- xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
+ dev = (struct cam_ed *)arg;
+ CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
+ devq = dev->sim->devq;
+ mtx_assert(&devq->send_mtx, MA_OWNED);
+ if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
+ xpt_run_devq(devq);
}
void
xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
{
+ struct cam_ed *dev;
+ struct cam_devq *devq;
- mtx_assert(path->bus->sim->mtx, MA_OWNED);
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
count, run_queue));
- xpt_release_devq_device(path->device, count, run_queue);
+ dev = path->device;
+ devq = dev->sim->devq;
+ mtx_lock(&devq->send_mtx);
+ if (xpt_release_devq_device(dev, count, run_queue))
+ xpt_run_devq(dev->sim->devq);
+ mtx_unlock(&devq->send_mtx);
}
-void
+static int
xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
{
+ mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
@@ -4261,34 +4376,32 @@ xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
callout_stop(&dev->callout);
dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
}
- xpt_run_dev_allocq(dev);
- if (run_queue == 0)
- return;
/*
* Now that we are unfrozen schedule the
* device so any pending transactions are
* run.
*/
- if (xpt_schedule_devq(dev->sim->devq, dev))
- xpt_run_devq(dev->sim->devq);
- }
+ xpt_schedule_devq(dev->sim->devq, dev);
+ } else
+ run_queue = 0;
+ return (run_queue);
}
void
xpt_release_simq(struct cam_sim *sim, int run_queue)
{
- struct camq *sendq;
+ struct cam_devq *devq;
- mtx_assert(sim->mtx, MA_OWNED);
- sendq = &(sim->devq->send_queue);
- if (sendq->qfrozen_cnt <= 0) {
+ devq = sim->devq;
+ mtx_lock(&devq->send_mtx);
+ if (devq->send_queue.qfrozen_cnt <= 0) {
#ifdef INVARIANTS
printf("xpt_release_simq: requested 1 > present %u\n",
- sendq->qfrozen_cnt);
+ devq->send_queue.qfrozen_cnt);
#endif
} else
- sendq->qfrozen_cnt--;
- if (sendq->qfrozen_cnt == 0) {
+ devq->send_queue.qfrozen_cnt--;
+ if (devq->send_queue.qfrozen_cnt == 0) {
/*
* If there is a timeout scheduled to release this
* sim queue, remove it. The queue frozen count is
@@ -4305,6 +4418,7 @@ xpt_release_simq(struct cam_sim *sim, int run_queue)
xpt_run_devq(sim->devq);
}
}
+ mtx_unlock(&devq->send_mtx);
}
/*
@@ -4322,49 +4436,34 @@ xpt_release_simq_timeout(void *arg)
void
xpt_done(union ccb *done_ccb)
{
- struct cam_sim *sim;
- int first;
+ struct cam_doneq *queue;
+ int run, hash;
CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
- if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
- /*
- * Queue up the request for handling by our SWI handler
- * any of the "non-immediate" type of ccbs.
- */
- sim = done_ccb->ccb_h.path->bus->sim;
- TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
- sim_links.tqe);
- done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
- if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED |
- CAM_SIM_BATCH)) == 0) {
- mtx_lock(&cam_simq_lock);
- first = TAILQ_EMPTY(&cam_simq);
- TAILQ_INSERT_TAIL(&cam_simq, sim, links);
- mtx_unlock(&cam_simq_lock);
- sim->flags |= CAM_SIM_ON_DONEQ;
- if (first)
- swi_sched(cambio_ih, 0);
- }
- }
-}
-
-void
-xpt_batch_start(struct cam_sim *sim)
-{
+ if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
+ return;
- KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set"));
- sim->flags |= CAM_SIM_BATCH;
+ hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
+ done_ccb->ccb_h.target_lun) % cam_num_doneqs;
+ queue = &cam_doneqs[hash];
+ mtx_lock(&queue->cam_doneq_mtx);
+ run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
+ STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
+ done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
+ mtx_unlock(&queue->cam_doneq_mtx);
+ if (run)
+ wakeup(&queue->cam_doneq);
}
void
-xpt_batch_done(struct cam_sim *sim)
+xpt_done_direct(union ccb *done_ccb)
{
- KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set"));
- sim->flags &= ~CAM_SIM_BATCH;
- if (!TAILQ_EMPTY(&sim->sim_doneq) &&
- (sim->flags & CAM_SIM_ON_DONEQ) == 0)
- camisr_runqueue(sim);
+ CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n"));
+ if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
+ return;
+
+ xpt_done_process(&done_ccb->ccb_h);
}
union ccb *
@@ -4397,32 +4496,64 @@ xpt_free_ccb(union ccb *free_ccb)
/*
* Get a CAM control block for the caller. Charge the structure to the device
- * referenced by the path. If the this device has no 'credits' then the
- * device already has the maximum number of outstanding operations under way
- * and we return NULL. If we don't have sufficient resources to allocate more
- * ccbs, we also return NULL.
+ * referenced by the path. If we don't have sufficient resources to allocate
+ * more ccbs, we return NULL.
*/
static union ccb *
-xpt_get_ccb(struct cam_ed *device)
+xpt_get_ccb_nowait(struct cam_periph *periph)
{
union ccb *new_ccb;
- struct cam_sim *sim;
- sim = device->sim;
- if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
- new_ccb = xpt_alloc_ccb_nowait();
- if (new_ccb == NULL) {
- return (NULL);
- }
- SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
- xpt_links.sle);
- sim->ccb_count++;
- }
- cam_ccbq_take_opening(&device->ccbq);
- SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
+ new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_NOWAIT);
+ if (new_ccb == NULL)
+ return (NULL);
+ periph->periph_allocated++;
+ cam_ccbq_take_opening(&periph->path->device->ccbq);
+ return (new_ccb);
+}
+
+static union ccb *
+xpt_get_ccb(struct cam_periph *periph)
+{
+ union ccb *new_ccb;
+
+ cam_periph_unlock(periph);
+ new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_WAITOK);
+ cam_periph_lock(periph);
+ periph->periph_allocated++;
+ cam_ccbq_take_opening(&periph->path->device->ccbq);
return (new_ccb);
}
+union ccb *
+cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
+{
+ struct ccb_hdr *ccb_h;
+
+ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
+ cam_periph_assert(periph, MA_OWNED);
+ while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
+ ccb_h->pinfo.priority != priority) {
+ if (priority < periph->immediate_priority) {
+ periph->immediate_priority = priority;
+ xpt_run_allocq(periph, 0);
+ } else
+ cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
+ "cgticb", 0);
+ }
+ SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
+ return ((union ccb *)ccb_h);
+}
+
+static void
+xpt_acquire_bus(struct cam_eb *bus)
+{
+
+ xpt_lock_buses();
+ bus->refcount++;
+ xpt_unlock_buses();
+}
+
static void
xpt_release_bus(struct cam_eb *bus)
{
@@ -4433,12 +4564,13 @@ xpt_release_bus(struct cam_eb *bus)
xpt_unlock_buses();
return;
}
- KASSERT(TAILQ_EMPTY(&bus->et_entries),
- ("refcount is zero, but target list is not empty"));
TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
xsoftc.bus_generation++;
xpt_unlock_buses();
+ KASSERT(TAILQ_EMPTY(&bus->et_entries),
+ ("destroying bus, but target list is not empty"));
cam_sim_release(bus->sim);
+ mtx_destroy(&bus->eb_mtx);
free(bus, M_CAMXPT);
}
@@ -4447,7 +4579,8 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
{
struct cam_et *cur_target, *target;
- mtx_assert(bus->sim->mtx, MA_OWNED);
+ mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
+ mtx_assert(&bus->eb_mtx, MA_OWNED);
target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
M_NOWAIT|M_ZERO);
if (target == NULL)
@@ -4459,14 +4592,13 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
target->refcount = 1;
target->generation = 0;
target->luns = NULL;
+ mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
timevalclear(&target->last_reset);
/*
* Hold a reference to our parent bus so it
* will not go away before we do.
*/
- xpt_lock_buses();
bus->refcount++;
- xpt_unlock_buses();
/* Insertion sort into our bus's target list */
cur_target = TAILQ_FIRST(&bus->et_entries);
@@ -4482,17 +4614,32 @@ xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
}
static void
+xpt_acquire_target(struct cam_et *target)
+{
+ struct cam_eb *bus = target->bus;
+
+ mtx_lock(&bus->eb_mtx);
+ target->refcount++;
+ mtx_unlock(&bus->eb_mtx);
+}
+
+static void
xpt_release_target(struct cam_et *target)
{
+ struct cam_eb *bus = target->bus;
- mtx_assert(target->bus->sim->mtx, MA_OWNED);
- if (--target->refcount > 0)
+ mtx_lock(&bus->eb_mtx);
+ if (--target->refcount > 0) {
+ mtx_unlock(&bus->eb_mtx);
return;
+ }
+ TAILQ_REMOVE(&bus->et_entries, target, links);
+ bus->generation++;
+ mtx_unlock(&bus->eb_mtx);
KASSERT(TAILQ_EMPTY(&target->ed_entries),
- ("refcount is zero, but device list is not empty"));
- TAILQ_REMOVE(&target->bus->et_entries, target, links);
- target->bus->generation++;
- xpt_release_bus(target->bus);
+ ("destroying target, but device list is not empty"));
+ xpt_release_bus(bus);
+ mtx_destroy(&target->luns_mtx);
if (target->luns)
free(target->luns, M_CAMXPT);
free(target, M_CAMXPT);
@@ -4510,10 +4657,19 @@ xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
device->mintags = 1;
device->maxtags = 1;
- bus->sim->max_ccbs += device->ccbq.devq_openings;
return (device);
}
+static void
+xpt_destroy_device(void *context, int pending)
+{
+ struct cam_ed *device = context;
+
+ mtx_lock(&device->device_mtx);
+ mtx_destroy(&device->device_mtx);
+ free(device, M_CAMDEV);
+}
+
struct cam_ed *
xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
{
@@ -4521,10 +4677,12 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
struct cam_devq *devq;
cam_status status;
- mtx_assert(target->bus->sim->mtx, MA_OWNED);
+ mtx_assert(&bus->eb_mtx, MA_OWNED);
/* Make space for us in the device queue on our bus */
devq = bus->sim->devq;
+ mtx_lock(&devq->send_mtx);
status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
+ mtx_unlock(&devq->send_mtx);
if (status != CAM_REQ_CMP)
return (NULL);
@@ -4533,19 +4691,12 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
if (device == NULL)
return (NULL);
- cam_init_pinfo(&device->devq_entry.pinfo);
- device->devq_entry.device = device;
+ cam_init_pinfo(&device->devq_entry);
device->target = target;
device->lun_id = lun_id;
device->sim = bus->sim;
- /* Initialize our queues */
- if (camq_init(&device->drvq, 0) != 0) {
- free(device, M_CAMDEV);
- return (NULL);
- }
if (cam_ccbq_init(&device->ccbq,
bus->sim->max_dev_openings) != 0) {
- camq_fini(&device->drvq);
free(device, M_CAMDEV);
return (NULL);
}
@@ -4556,7 +4707,14 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
device->tag_delay_count = 0;
device->tag_saved_openings = 0;
device->refcount = 1;
- callout_init_mtx(&device->callout, bus->sim->mtx, 0);
+ mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
+ callout_init_mtx(&device->callout, &devq->send_mtx, 0);
+ TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
+ /*
+ * Hold a reference to our parent bus so it
+ * will not go away before we do.
+ */
+ target->refcount++;
cur_device = TAILQ_FIRST(&target->ed_entries);
while (cur_device != NULL && cur_device->lun_id < lun_id)
@@ -4565,7 +4723,6 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
TAILQ_INSERT_BEFORE(cur_device, device, links);
else
TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
- target->refcount++;
target->generation++;
return (device);
}
@@ -4573,35 +4730,45 @@ xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
void
xpt_acquire_device(struct cam_ed *device)
{
+ struct cam_eb *bus = device->target->bus;
- mtx_assert(device->sim->mtx, MA_OWNED);
+ mtx_lock(&bus->eb_mtx);
device->refcount++;
+ mtx_unlock(&bus->eb_mtx);
}
void
xpt_release_device(struct cam_ed *device)
{
+ struct cam_eb *bus = device->target->bus;
struct cam_devq *devq;
- mtx_assert(device->sim->mtx, MA_OWNED);
- if (--device->refcount > 0)
+ mtx_lock(&bus->eb_mtx);
+ if (--device->refcount > 0) {
+ mtx_unlock(&bus->eb_mtx);
return;
+ }
+
+ TAILQ_REMOVE(&device->target->ed_entries, device,links);
+ device->target->generation++;
+ mtx_unlock(&bus->eb_mtx);
+
+ /* Release our slot in the devq */
+ devq = bus->sim->devq;
+ mtx_lock(&devq->send_mtx);
+ cam_devq_resize(devq, devq->send_queue.array_size - 1);
+ mtx_unlock(&devq->send_mtx);
KASSERT(SLIST_EMPTY(&device->periphs),
- ("refcount is zero, but periphs list is not empty"));
- if (device->devq_entry.pinfo.index != CAM_UNQUEUED_INDEX)
- panic("Removing device while still queued for ccbs");
+ ("destroying device, but periphs list is not empty"));
+ KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
+ ("destroying device while still queued for ccbs"));
if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
callout_stop(&device->callout);
- TAILQ_REMOVE(&device->target->ed_entries, device,links);
- device->target->generation++;
- device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings;
- /* Release our slot in the devq */
- devq = device->target->bus->sim->devq;
- cam_devq_resize(devq, devq->send_queue.array_size - 1);
- camq_fini(&device->drvq);
+ xpt_release_target(device->target);
+
cam_ccbq_fini(&device->ccbq);
/*
* Free allocated memory. free(9) does nothing if the
@@ -4613,27 +4780,22 @@ xpt_release_device(struct cam_ed *device)
free(device->physpath, M_CAMXPT);
free(device->rcap_buf, M_CAMXPT);
free(device->serial_num, M_CAMXPT);
-
- xpt_release_target(device->target);
- free(device, M_CAMDEV);
+ taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
}
u_int32_t
xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
{
- int diff;
int result;
struct cam_ed *dev;
dev = path->device;
-
- diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
+ mtx_lock(&dev->sim->devq->send_mtx);
result = cam_ccbq_resize(&dev->ccbq, newopenings);
+ mtx_unlock(&dev->sim->devq->send_mtx);
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
|| (dev->inq_flags & SID_CmdQue) != 0)
dev->tag_saved_openings = newopenings;
- /* Adjust the global limit */
- dev->sim->max_ccbs += diff;
return (result);
}
@@ -4660,7 +4822,7 @@ xpt_find_target(struct cam_eb *bus, target_id_t target_id)
{
struct cam_et *target;
- mtx_assert(bus->sim->mtx, MA_OWNED);
+ mtx_assert(&bus->eb_mtx, MA_OWNED);
for (target = TAILQ_FIRST(&bus->et_entries);
target != NULL;
target = TAILQ_NEXT(target, links)) {
@@ -4677,7 +4839,7 @@ xpt_find_device(struct cam_et *target, lun_id_t lun_id)
{
struct cam_ed *device;
- mtx_assert(target->bus->sim->mtx, MA_OWNED);
+ mtx_assert(&target->bus->eb_mtx, MA_OWNED);
for (device = TAILQ_FIRST(&target->ed_entries);
device != NULL;
device = TAILQ_NEXT(device, links)) {
@@ -4757,10 +4919,12 @@ xpt_config(void *arg)
/*
* Now that interrupts are enabled, go find our devices
*/
+ if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
+ printf("xpt_config: failed to create taskqueue thread.\n");
/* Setup debugging path */
if (cam_dflags != CAM_DEBUG_NONE) {
- if (xpt_create_path_unlocked(&cam_dpath, NULL,
+ if (xpt_create_path(&cam_dpath, NULL,
CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
CAM_DEBUG_LUN) != CAM_REQ_CMP) {
printf("xpt_config: xpt_create_path() failed for debug"
@@ -4777,7 +4941,8 @@ xpt_config(void *arg)
callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000,
xpt_boot_delay, NULL);
/* Fire up rescan thread. */
- if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
+ if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
+ "cam", "scanner")) {
printf("xpt_config: failed to create rescan thread.\n");
}
}
@@ -4862,13 +5027,11 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
int xptpath = 0;
if (path == NULL) {
- mtx_lock(&xsoftc.xpt_lock);
status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
- if (status != CAM_REQ_CMP) {
- mtx_unlock(&xsoftc.xpt_lock);
+ if (status != CAM_REQ_CMP)
return (status);
- }
+ xpt_path_lock(path);
xptpath = 1;
}
@@ -4881,8 +5044,8 @@ xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
status = csa.ccb_h.status;
if (xptpath) {
+ xpt_path_unlock(path);
xpt_free_path(path);
- mtx_unlock(&xsoftc.xpt_lock);
}
if ((status == CAM_REQ_CMP) &&
@@ -4967,122 +5130,172 @@ xpt_unlock_buses(void)
mtx_unlock(&xsoftc.xpt_topo_lock);
}
-static void
-camisr(void *dummy)
+struct mtx *
+xpt_path_mtx(struct cam_path *path)
{
- cam_simq_t queue;
- struct cam_sim *sim;
-
- mtx_lock(&cam_simq_lock);
- TAILQ_INIT(&queue);
- while (!TAILQ_EMPTY(&cam_simq)) {
- TAILQ_CONCAT(&queue, &cam_simq, links);
- mtx_unlock(&cam_simq_lock);
- while ((sim = TAILQ_FIRST(&queue)) != NULL) {
- TAILQ_REMOVE(&queue, sim, links);
- CAM_SIM_LOCK(sim);
- camisr_runqueue(sim);
- sim->flags &= ~CAM_SIM_ON_DONEQ;
- CAM_SIM_UNLOCK(sim);
- }
- mtx_lock(&cam_simq_lock);
- }
- mtx_unlock(&cam_simq_lock);
+ return (&path->device->device_mtx);
}
static void
-camisr_runqueue(struct cam_sim *sim)
+xpt_done_process(struct ccb_hdr *ccb_h)
{
- struct ccb_hdr *ccb_h;
+ struct cam_sim *sim;
+ struct cam_devq *devq;
+ struct mtx *mtx = NULL;
- while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) {
- int runq;
+ if (ccb_h->flags & CAM_HIGH_POWER) {
+ struct highpowerlist *hphead;
+ struct cam_ed *device;
- TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe);
- ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
+ mtx_lock(&xsoftc.xpt_lock);
+ hphead = &xsoftc.highpowerq;
- CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
- ("camisr\n"));
+ device = STAILQ_FIRST(hphead);
- runq = FALSE;
+ /*
+ * Increment the count since this command is done.
+ */
+ xsoftc.num_highpower++;
- if (ccb_h->flags & CAM_HIGH_POWER) {
- struct highpowerlist *hphead;
- struct cam_ed *device;
+ /*
+ * Any high powered commands queued up?
+ */
+ if (device != NULL) {
- mtx_lock(&xsoftc.xpt_lock);
- hphead = &xsoftc.highpowerq;
+ STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
+ mtx_unlock(&xsoftc.xpt_lock);
- device = STAILQ_FIRST(hphead);
+ mtx_lock(&device->sim->devq->send_mtx);
+ xpt_release_devq_device(device,
+ /*count*/1, /*runqueue*/TRUE);
+ mtx_unlock(&device->sim->devq->send_mtx);
+ } else
+ mtx_unlock(&xsoftc.xpt_lock);
+ }
- /*
- * Increment the count since this command is done.
- */
- xsoftc.num_highpower++;
+ sim = ccb_h->path->bus->sim;
- /*
- * Any high powered commands queued up?
- */
- if (device != NULL) {
+ if (ccb_h->status & CAM_RELEASE_SIMQ) {
+ xpt_release_simq(sim, /*run_queue*/FALSE);
+ ccb_h->status &= ~CAM_RELEASE_SIMQ;
+ }
- STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
- mtx_unlock(&xsoftc.xpt_lock);
+ if ((ccb_h->flags & CAM_DEV_QFRZDIS)
+ && (ccb_h->status & CAM_DEV_QFRZN)) {
+ xpt_release_devq(ccb_h->path, /*count*/1,
+ /*run_queue*/FALSE);
+ ccb_h->status &= ~CAM_DEV_QFRZN;
+ }
- xpt_release_devq_device(device,
- /*count*/1, /*runqueue*/TRUE);
- } else
- mtx_unlock(&xsoftc.xpt_lock);
+ devq = sim->devq;
+ if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
+ struct cam_ed *dev = ccb_h->path->device;
+
+ mtx_lock(&devq->send_mtx);
+ devq->send_active--;
+ devq->send_openings++;
+ cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
+
+ if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
+ && (dev->ccbq.dev_active == 0))) {
+ dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
+ xpt_release_devq_device(dev, /*count*/1,
+ /*run_queue*/FALSE);
}
- if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
- struct cam_ed *dev;
+ if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
+ && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
+ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
+ xpt_release_devq_device(dev, /*count*/1,
+ /*run_queue*/FALSE);
+ }
- dev = ccb_h->path->device;
+ if (!device_is_queued(dev))
+ (void)xpt_schedule_devq(devq, dev);
+ mtx_unlock(&devq->send_mtx);
- cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
- sim->devq->send_active--;
- sim->devq->send_openings++;
- runq = TRUE;
-
- if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
- && (dev->ccbq.dev_active == 0))) {
- dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
- xpt_release_devq(ccb_h->path, /*count*/1,
- /*run_queue*/FALSE);
- }
-
- if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
- && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
- dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
- xpt_release_devq(ccb_h->path, /*count*/1,
- /*run_queue*/FALSE);
- }
+ if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
+ mtx = xpt_path_mtx(ccb_h->path);
+ mtx_lock(mtx);
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
&& (--dev->tag_delay_count == 0))
xpt_start_tags(ccb_h->path);
- if (!device_is_queued(dev)) {
- (void)xpt_schedule_devq(sim->devq, dev);
- }
}
+ }
- if (ccb_h->status & CAM_RELEASE_SIMQ) {
- xpt_release_simq(sim, /*run_queue*/TRUE);
- ccb_h->status &= ~CAM_RELEASE_SIMQ;
- runq = FALSE;
+ if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
+ if (mtx == NULL) {
+ mtx = xpt_path_mtx(ccb_h->path);
+ mtx_lock(mtx);
+ }
+ } else {
+ if (mtx != NULL) {
+ mtx_unlock(mtx);
+ mtx = NULL;
}
+ }
- if ((ccb_h->flags & CAM_DEV_QFRZDIS)
- && (ccb_h->status & CAM_DEV_QFRZN)) {
- xpt_release_devq(ccb_h->path, /*count*/1,
- /*run_queue*/TRUE);
- ccb_h->status &= ~CAM_DEV_QFRZN;
- } else if (runq) {
- xpt_run_devq(sim->devq);
+ /* Call the peripheral driver's callback */
+ (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
+ if (mtx != NULL)
+ mtx_unlock(mtx);
+
+ mtx_lock(&devq->send_mtx);
+ xpt_run_devq(devq);
+ mtx_unlock(&devq->send_mtx);
+}
+
+void
+xpt_done_td(void *arg)
+{
+ struct cam_doneq *queue = arg;
+ struct ccb_hdr *ccb_h;
+ STAILQ_HEAD(, ccb_hdr) doneq;
+
+ STAILQ_INIT(&doneq);
+ mtx_lock(&queue->cam_doneq_mtx);
+ while (1) {
+ while (STAILQ_EMPTY(&queue->cam_doneq)) {
+ queue->cam_doneq_sleep = 1;
+ msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
+ PRIBIO, "-", 0);
+ queue->cam_doneq_sleep = 0;
+ }
+ STAILQ_CONCAT(&doneq, &queue->cam_doneq);
+ mtx_unlock(&queue->cam_doneq_mtx);
+
+ THREAD_NO_SLEEPING();
+ while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
+ STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
+ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
+ xpt_done_process(ccb_h);
}
+ THREAD_SLEEPING_OK();
- /* Call the peripheral driver's callback */
- (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
+ mtx_lock(&queue->cam_doneq_mtx);
+ }
+}
+
+static void
+camisr_runqueue(void)
+{
+ struct ccb_hdr *ccb_h;
+ struct cam_doneq *queue;
+ int i;
+
+ /* Process global queues. */
+ for (i = 0; i < cam_num_doneqs; i++) {
+ queue = &cam_doneqs[i];
+ mtx_lock(&queue->cam_doneq_mtx);
+ while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
+ STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
+ mtx_unlock(&queue->cam_doneq_mtx);
+ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
+ xpt_done_process(ccb_h);
+ mtx_lock(&queue->cam_doneq_mtx);
+ }
+ mtx_unlock(&queue->cam_doneq_mtx);
}
}