4026 RSM_KERNEL_AGENT | RSM_FUNC_ALL | RSM_INTR_CALLBACK);
4027
4028 DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_intr_event enter\n"));
4029
4030 src_node = msg->rsmipc_hdr.rsmipc_src;
4031
4032 if ((seg = msg->rsmipc_segment_cookie) != NULL) {
4033 /* This is for an import segment */
4034 uint_t hashval = rsmhash(msg->rsmipc_key);
4035
4036 rw_enter(&rsm_import_segs.rsmhash_rw, RW_READER);
4037
4038 p = (rsmresource_t *)rsmhash_getbkt(&rsm_import_segs, hashval);
4039
4040 for (; p; p = p->rsmrc_next) {
4041 if ((p->rsmrc_key == msg->rsmipc_key) &&
4042 (p->rsmrc_node == src_node)) {
4043 seg = (rsmseg_t *)p;
4044 rsmseglock_acquire(seg);
4045
4046 atomic_add_32(&seg->s_pollevent, 1);
4047
4048 if (seg->s_pollflag & RSM_SEGMENT_POLL)
4049 pollwakeup(&seg->s_poll, POLLRDNORM);
4050
4051 rsmseglock_release(seg);
4052 }
4053 }
4054
4055 rw_exit(&rsm_import_segs.rsmhash_rw);
4056 } else {
4057 /* This is for an export segment */
4058 seg = rsmexport_lookup(msg->rsmipc_key);
4059 if (!seg) {
4060 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
4061 "rsm_intr_event done: exp seg not found\n"));
4062 return;
4063 }
4064
4065 ASSERT(rsmseglock_held(seg));
4066
4067 atomic_add_32(&seg->s_pollevent, 1);
4068
4069 /*
4070 * We must hold the segment lock here, or else the segment
4071 * can be freed while pollwakeup is using it. This implies
4072 * that we MUST NOT grab the segment lock during rsm_chpoll,
4073 * as outlined in the chpoll(2) man page.
4074 */
4075 if (seg->s_pollflag & RSM_SEGMENT_POLL)
4076 pollwakeup(&seg->s_poll, POLLRDNORM);
4077
4078 rsmseglock_release(seg);
4079 }
4080
4081 DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_intr_event done\n"));
4082 }
4083
4084 /*
4085 * The exporter did a republish and changed the ACL - this change is only
4086 * visible to new importers.
4087 */
5485 * release the reserved msgbuf since
5486 * the send failed
5487 */
5488 sendq_token->msgbuf_avail++;
5489 cv_broadcast(&sendq_token->sendq_cv);
5490 mutex_exit(&path->mutex);
5491 }
5492 } else
5493 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is,
5494 NULL);
5495
5496 no_reply_cnt++;
5497 rele_sendq_token(sendq_token);
5498 if (e != RSM_SUCCESS) {
5499 DBG_PRINTF((category, RSM_ERR,
5500 "rsm: rsmipc_send no reply send"
5501 " err = %d no reply count = %d\n",
5502 e, no_reply_cnt));
5503 ASSERT(e != RSMERR_QUEUE_FENCE_UP &&
5504 e != RSMERR_BAD_BARRIER_HNDL);
5505 atomic_add_64(&rsm_ipcsend_errcnt, 1);
5506 goto again;
5507 } else {
5508 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5509 "rsmipc_send done\n"));
5510 return (e);
5511 }
5512
5513 }
5514
5515 if (req == NULL) {
5516 /* Send reply - No flow control is done for reply */
5517 /*
5518 * Set the version in the msg header for KA communication
5519 * versioning
5520 */
5521 reply->rsmipc_hdr.rsmipc_version = RSM_VERSION;
5522 reply->rsmipc_hdr.rsmipc_src = my_nodeid;
5523 /* incn number is not used for reply msgs currently */
5524 reply->rsmipc_hdr.rsmipc_incn = path->remote_incn;
5525
5526 is.is_data = (void *)reply;
5527 is.is_size = sizeof (*reply);
5528 is.is_flags = RSM_INTR_SEND_DELIVER | RSM_INTR_SEND_SLEEP;
5529 is.is_wait = 0;
5530 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is, NULL);
5531 rele_sendq_token(sendq_token);
5532 if (e != RSM_SUCCESS) {
5533 DBG_PRINTF((category, RSM_ERR,
5534 "rsm: rsmipc_send reply send"
5535 " err = %d\n", e));
5536 atomic_add_64(&rsm_ipcsend_errcnt, 1);
5537 goto again;
5538 } else {
5539 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5540 "rsmipc_send done\n"));
5541 return (e);
5542 }
5543 }
5544
5545 /* Reply needed */
5546 rslot = rsmipc_alloc(); /* allocate a new ipc slot */
5547
5548 mutex_enter(&rslot->rsmipc_lock);
5549
5550 rslot->rsmipc_data = (void *)reply;
5551 RSMIPC_SET(rslot, RSMIPC_PENDING);
5552
5553 while (RSMIPC_GET(rslot, RSMIPC_PENDING)) {
5554 /*
5555 * Set the rsmipc_version number in the msghdr for KA
5556 * communication versioning
5622 if (e != RSM_SUCCESS) {
5623 mutex_enter(&path->mutex);
5624 /*
5625 * release the reserved msgbuf since
5626 * the send failed
5627 */
5628 sendq_token->msgbuf_avail++;
5629 cv_broadcast(&sendq_token->sendq_cv);
5630 mutex_exit(&path->mutex);
5631 }
5632 } else
5633 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is,
5634 NULL);
5635
5636 if (e != RSM_SUCCESS) {
5637 DBG_PRINTF((category, RSM_ERR,
5638 "rsm: rsmipc_send rsmpi send err = %d\n", e));
5639 RSMIPC_CLEAR(rslot, RSMIPC_PENDING);
5640 rsmipc_free(rslot);
5641 rele_sendq_token(sendq_token);
5642 atomic_add_64(&rsm_ipcsend_errcnt, 1);
5643 goto again;
5644 }
5645
5646 /* wait for a reply signal, a SIGINT, or 5 sec. timeout */
5647 e = cv_reltimedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock,
5648 drv_usectohz(5000000), TR_CLOCK_TICK);
5649 if (e < 0) {
5650 /* timed out - retry */
5651 e = RSMERR_TIMEOUT;
5652 } else if (e == 0) {
5653 /* signalled - return error */
5654 e = RSMERR_INTERRUPTED;
5655 break;
5656 } else {
5657 e = RSM_SUCCESS;
5658 }
5659 }
5660
5661 RSMIPC_CLEAR(rslot, RSMIPC_PENDING);
5662 rsmipc_free(rslot);
5918 do {
5919 /* drop the path lock before doing the rsm_send */
5920 mutex_exit(&path->mutex);
5921
5922 is.is_data = (void *)&msg;
5923 is.is_size = sizeof (msg);
5924 is.is_flags = RSM_INTR_SEND_DELIVER | RSM_INTR_SEND_SLEEP;
5925 is.is_wait = 0;
5926
5927 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is, NULL);
5928
5929 ASSERT(e != RSMERR_QUEUE_FENCE_UP &&
5930 e != RSMERR_BAD_BARRIER_HNDL);
5931
5932 mutex_enter(&path->mutex);
5933
5934 if (e == RSM_SUCCESS) {
5935 break;
5936 }
5937 /* error counter for statistics */
5938 atomic_add_64(&rsm_ctrlmsg_errcnt, 1);
5939
5940 DBG_PRINTF((category, RSM_ERR,
5941 "rsmipc_send_controlmsg:rsm_send error=%d", e));
5942
5943 if (++retry_cnt == min_retry_cnt) { /* backoff before retry */
5944 (void) cv_reltimedwait(&path->sendq_token.sendq_cv,
5945 &path->mutex, drv_usectohz(10000), TR_CLOCK_TICK);
5946 retry_cnt = 0;
5947 }
5948 } while (path->state == RSMKA_PATH_ACTIVE);
5949
5950 /* decrement the sendq,path refcnt that we incr before rsm_send */
5951 SENDQ_TOKEN_RELE(path);
5952 PATH_RELE_NOLOCK(path);
5953
5954 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5955 "rsmipc_send_controlmsg done=%d", e));
5956 return (e);
5957 }
5958
6432 return (
6433 RSMERR_SEG_NOT_PUBLISHED_TO_NODE);
6434 else if ((e == RSMERR_RSM_ADDR_UNREACHABLE) ||
6435 (e == RSMERR_UNKNOWN_RSM_ADDR))
6436 return (RSMERR_REMOTE_NODE_UNREACHABLE);
6437 else
6438 return (e);
6439 }
6440
6441 }
6442 seg->s_handle.in = sharedp->rsmsi_handle;
6443
6444 }
6445
6446 seg->s_state = RSM_STATE_CONNECT;
6447
6448
6449 seg->s_flags &= ~RSM_IMPORT_DUMMY; /* clear dummy flag */
6450 if (bar_va) {
6451 /* increment generation number on barrier page */
6452 atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1);
6453 /* return user off into barrier page where status will be */
6454 msg->off = (int)seg->s_hdr.rsmrc_num;
6455 msg->gnum = bar_va[msg->off]; /* gnum race */
6456 } else {
6457 msg->off = 0;
6458 msg->gnum = 0; /* gnum race */
6459 }
6460
6461 msg->len = (int)sharedp->rsmsi_seglen;
6462 msg->rnum = seg->s_minor;
6463 rsmsharecv_signal(seg, RSMSI_STATE_CONNECTING, RSMSI_STATE_CONNECTED);
6464 rsmsharelock_release(seg);
6465 rsmseglock_release(seg);
6466
6467 /* Return back to user the segment size & perm in case it's needed */
6468
6469 #ifdef _MULTI_DATAMODEL
6470 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
6471 rsm_ioctlmsg32_t msg32;
6472
6668
6669 sharedp->rsmsi_refcnt--;
6670
6671 if (sharedp->rsmsi_refcnt == 0) {
6672 *cookie = (void *)sharedp->rsmsi_cookie;
6673 sharedp->rsmsi_state = RSMSI_STATE_DISCONNECTED;
6674 sharedp->rsmsi_handle = NULL;
6675 rsmsharelock_release(seg);
6676
6677 /* clean up the shared data structure */
6678 mutex_destroy(&sharedp->rsmsi_lock);
6679 cv_destroy(&sharedp->rsmsi_cv);
6680 kmem_free((void *)(sharedp), sizeof (rsm_import_share_t));
6681
6682 } else {
6683 rsmsharelock_release(seg);
6684 }
6685
6686 /* increment generation number on barrier page */
6687 if (bar_va) {
6688 atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1);
6689 }
6690
6691 /*
6692 * The following needs to be done after any
6693 * rsmsharelock calls which use seg->s_share.
6694 */
6695 seg->s_share = NULL;
6696
6697 seg->s_state = RSM_STATE_DISCONNECT;
6698 /* signal anyone waiting in the CONN_QUIESCE state */
6699 cv_broadcast(&seg->s_cv);
6700
6701 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
6702 "rsm_closeconnection done\n"));
6703
6704 return (RSM_SUCCESS);
6705 }
6706
6707 int
6708 rsm_disconnect(rsmseg_t *seg)
7267 DBG_DEFINE(category, RSM_KERNEL_AGENT | RSM_IOCTL);
7268
7269 event_list = events;
7270
7271 if ((rc = rsm_consumeevent_copyin(arg, &msg, &event_list, mode)) !=
7272 RSM_SUCCESS) {
7273 return (rc);
7274 }
7275
7276 for (i = 0; i < msg.numents; i++) {
7277 rnum = event_list[i].rnum;
7278 event_list[i].revent = 0;
7279 /* get the segment structure */
7280 seg = (rsmseg_t *)rsmresource_lookup(rnum, RSM_LOCK);
7281 if (seg) {
7282 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
7283 "consumeevent_ioctl: rnum(%d) seg(%p)\n", rnum,
7284 seg));
7285 if (seg->s_pollevent) {
7286 /* consume the event */
7287 atomic_add_32(&seg->s_pollevent, -1);
7288 event_list[i].revent = POLLRDNORM;
7289 }
7290 rsmseglock_release(seg);
7291 }
7292 }
7293
7294 if ((rc = rsm_consumeevent_copyout(&msg, event_list, mode)) !=
7295 RSM_SUCCESS) {
7296 return (rc);
7297 }
7298
7299 return (RSM_SUCCESS);
7300 }
7301
7302 static int
7303 iovec_copyin(caddr_t user_vec, rsmka_iovec_t *iovec, int count, int mode)
7304 {
7305 int size;
7306 DBG_DEFINE(category, RSM_KERNEL_AGENT | RSM_IMPORT | RSM_IOCTL);
7307
|
4026 RSM_KERNEL_AGENT | RSM_FUNC_ALL | RSM_INTR_CALLBACK);
4027
4028 DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_intr_event enter\n"));
4029
4030 src_node = msg->rsmipc_hdr.rsmipc_src;
4031
4032 if ((seg = msg->rsmipc_segment_cookie) != NULL) {
4033 /* This is for an import segment */
4034 uint_t hashval = rsmhash(msg->rsmipc_key);
4035
4036 rw_enter(&rsm_import_segs.rsmhash_rw, RW_READER);
4037
4038 p = (rsmresource_t *)rsmhash_getbkt(&rsm_import_segs, hashval);
4039
4040 for (; p; p = p->rsmrc_next) {
4041 if ((p->rsmrc_key == msg->rsmipc_key) &&
4042 (p->rsmrc_node == src_node)) {
4043 seg = (rsmseg_t *)p;
4044 rsmseglock_acquire(seg);
4045
4046 atomic_inc_32(&seg->s_pollevent);
4047
4048 if (seg->s_pollflag & RSM_SEGMENT_POLL)
4049 pollwakeup(&seg->s_poll, POLLRDNORM);
4050
4051 rsmseglock_release(seg);
4052 }
4053 }
4054
4055 rw_exit(&rsm_import_segs.rsmhash_rw);
4056 } else {
4057 /* This is for an export segment */
4058 seg = rsmexport_lookup(msg->rsmipc_key);
4059 if (!seg) {
4060 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
4061 "rsm_intr_event done: exp seg not found\n"));
4062 return;
4063 }
4064
4065 ASSERT(rsmseglock_held(seg));
4066
4067 atomic_inc_32(&seg->s_pollevent);
4068
4069 /*
4070 * We must hold the segment lock here, or else the segment
4071 * can be freed while pollwakeup is using it. This implies
4072 * that we MUST NOT grab the segment lock during rsm_chpoll,
4073 * as outlined in the chpoll(2) man page.
4074 */
4075 if (seg->s_pollflag & RSM_SEGMENT_POLL)
4076 pollwakeup(&seg->s_poll, POLLRDNORM);
4077
4078 rsmseglock_release(seg);
4079 }
4080
4081 DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_intr_event done\n"));
4082 }
4083
4084 /*
4085 * The exporter did a republish and changed the ACL - this change is only
4086 * visible to new importers.
4087 */
5485 * release the reserved msgbuf since
5486 * the send failed
5487 */
5488 sendq_token->msgbuf_avail++;
5489 cv_broadcast(&sendq_token->sendq_cv);
5490 mutex_exit(&path->mutex);
5491 }
5492 } else
5493 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is,
5494 NULL);
5495
5496 no_reply_cnt++;
5497 rele_sendq_token(sendq_token);
5498 if (e != RSM_SUCCESS) {
5499 DBG_PRINTF((category, RSM_ERR,
5500 "rsm: rsmipc_send no reply send"
5501 " err = %d no reply count = %d\n",
5502 e, no_reply_cnt));
5503 ASSERT(e != RSMERR_QUEUE_FENCE_UP &&
5504 e != RSMERR_BAD_BARRIER_HNDL);
5505 atomic_inc_64(&rsm_ipcsend_errcnt);
5506 goto again;
5507 } else {
5508 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5509 "rsmipc_send done\n"));
5510 return (e);
5511 }
5512
5513 }
5514
5515 if (req == NULL) {
5516 /* Send reply - No flow control is done for reply */
5517 /*
5518 * Set the version in the msg header for KA communication
5519 * versioning
5520 */
5521 reply->rsmipc_hdr.rsmipc_version = RSM_VERSION;
5522 reply->rsmipc_hdr.rsmipc_src = my_nodeid;
5523 /* incn number is not used for reply msgs currently */
5524 reply->rsmipc_hdr.rsmipc_incn = path->remote_incn;
5525
5526 is.is_data = (void *)reply;
5527 is.is_size = sizeof (*reply);
5528 is.is_flags = RSM_INTR_SEND_DELIVER | RSM_INTR_SEND_SLEEP;
5529 is.is_wait = 0;
5530 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is, NULL);
5531 rele_sendq_token(sendq_token);
5532 if (e != RSM_SUCCESS) {
5533 DBG_PRINTF((category, RSM_ERR,
5534 "rsm: rsmipc_send reply send"
5535 " err = %d\n", e));
5536 atomic_inc_64(&rsm_ipcsend_errcnt);
5537 goto again;
5538 } else {
5539 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5540 "rsmipc_send done\n"));
5541 return (e);
5542 }
5543 }
5544
5545 /* Reply needed */
5546 rslot = rsmipc_alloc(); /* allocate a new ipc slot */
5547
5548 mutex_enter(&rslot->rsmipc_lock);
5549
5550 rslot->rsmipc_data = (void *)reply;
5551 RSMIPC_SET(rslot, RSMIPC_PENDING);
5552
5553 while (RSMIPC_GET(rslot, RSMIPC_PENDING)) {
5554 /*
5555 * Set the rsmipc_version number in the msghdr for KA
5556 * communication versioning
5622 if (e != RSM_SUCCESS) {
5623 mutex_enter(&path->mutex);
5624 /*
5625 * release the reserved msgbuf since
5626 * the send failed
5627 */
5628 sendq_token->msgbuf_avail++;
5629 cv_broadcast(&sendq_token->sendq_cv);
5630 mutex_exit(&path->mutex);
5631 }
5632 } else
5633 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is,
5634 NULL);
5635
5636 if (e != RSM_SUCCESS) {
5637 DBG_PRINTF((category, RSM_ERR,
5638 "rsm: rsmipc_send rsmpi send err = %d\n", e));
5639 RSMIPC_CLEAR(rslot, RSMIPC_PENDING);
5640 rsmipc_free(rslot);
5641 rele_sendq_token(sendq_token);
5642 atomic_inc_64(&rsm_ipcsend_errcnt);
5643 goto again;
5644 }
5645
5646 /* wait for a reply signal, a SIGINT, or 5 sec. timeout */
5647 e = cv_reltimedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock,
5648 drv_usectohz(5000000), TR_CLOCK_TICK);
5649 if (e < 0) {
5650 /* timed out - retry */
5651 e = RSMERR_TIMEOUT;
5652 } else if (e == 0) {
5653 /* signalled - return error */
5654 e = RSMERR_INTERRUPTED;
5655 break;
5656 } else {
5657 e = RSM_SUCCESS;
5658 }
5659 }
5660
5661 RSMIPC_CLEAR(rslot, RSMIPC_PENDING);
5662 rsmipc_free(rslot);
5918 do {
5919 /* drop the path lock before doing the rsm_send */
5920 mutex_exit(&path->mutex);
5921
5922 is.is_data = (void *)&msg;
5923 is.is_size = sizeof (msg);
5924 is.is_flags = RSM_INTR_SEND_DELIVER | RSM_INTR_SEND_SLEEP;
5925 is.is_wait = 0;
5926
5927 e = adapter->rsmpi_ops->rsm_send(ipc_handle, &is, NULL);
5928
5929 ASSERT(e != RSMERR_QUEUE_FENCE_UP &&
5930 e != RSMERR_BAD_BARRIER_HNDL);
5931
5932 mutex_enter(&path->mutex);
5933
5934 if (e == RSM_SUCCESS) {
5935 break;
5936 }
5937 /* error counter for statistics */
5938 atomic_inc_64(&rsm_ctrlmsg_errcnt);
5939
5940 DBG_PRINTF((category, RSM_ERR,
5941 "rsmipc_send_controlmsg:rsm_send error=%d", e));
5942
5943 if (++retry_cnt == min_retry_cnt) { /* backoff before retry */
5944 (void) cv_reltimedwait(&path->sendq_token.sendq_cv,
5945 &path->mutex, drv_usectohz(10000), TR_CLOCK_TICK);
5946 retry_cnt = 0;
5947 }
5948 } while (path->state == RSMKA_PATH_ACTIVE);
5949
5950 /* decrement the sendq,path refcnt that we incr before rsm_send */
5951 SENDQ_TOKEN_RELE(path);
5952 PATH_RELE_NOLOCK(path);
5953
5954 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
5955 "rsmipc_send_controlmsg done=%d", e));
5956 return (e);
5957 }
5958
6432 return (
6433 RSMERR_SEG_NOT_PUBLISHED_TO_NODE);
6434 else if ((e == RSMERR_RSM_ADDR_UNREACHABLE) ||
6435 (e == RSMERR_UNKNOWN_RSM_ADDR))
6436 return (RSMERR_REMOTE_NODE_UNREACHABLE);
6437 else
6438 return (e);
6439 }
6440
6441 }
6442 seg->s_handle.in = sharedp->rsmsi_handle;
6443
6444 }
6445
6446 seg->s_state = RSM_STATE_CONNECT;
6447
6448
6449 seg->s_flags &= ~RSM_IMPORT_DUMMY; /* clear dummy flag */
6450 if (bar_va) {
6451 /* increment generation number on barrier page */
6452 atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num);
6453 /* return user off into barrier page where status will be */
6454 msg->off = (int)seg->s_hdr.rsmrc_num;
6455 msg->gnum = bar_va[msg->off]; /* gnum race */
6456 } else {
6457 msg->off = 0;
6458 msg->gnum = 0; /* gnum race */
6459 }
6460
6461 msg->len = (int)sharedp->rsmsi_seglen;
6462 msg->rnum = seg->s_minor;
6463 rsmsharecv_signal(seg, RSMSI_STATE_CONNECTING, RSMSI_STATE_CONNECTED);
6464 rsmsharelock_release(seg);
6465 rsmseglock_release(seg);
6466
6467 /* Return back to user the segment size & perm in case it's needed */
6468
6469 #ifdef _MULTI_DATAMODEL
6470 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
6471 rsm_ioctlmsg32_t msg32;
6472
6668
6669 sharedp->rsmsi_refcnt--;
6670
6671 if (sharedp->rsmsi_refcnt == 0) {
6672 *cookie = (void *)sharedp->rsmsi_cookie;
6673 sharedp->rsmsi_state = RSMSI_STATE_DISCONNECTED;
6674 sharedp->rsmsi_handle = NULL;
6675 rsmsharelock_release(seg);
6676
6677 /* clean up the shared data structure */
6678 mutex_destroy(&sharedp->rsmsi_lock);
6679 cv_destroy(&sharedp->rsmsi_cv);
6680 kmem_free((void *)(sharedp), sizeof (rsm_import_share_t));
6681
6682 } else {
6683 rsmsharelock_release(seg);
6684 }
6685
6686 /* increment generation number on barrier page */
6687 if (bar_va) {
6688 atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num);
6689 }
6690
6691 /*
6692 * The following needs to be done after any
6693 * rsmsharelock calls which use seg->s_share.
6694 */
6695 seg->s_share = NULL;
6696
6697 seg->s_state = RSM_STATE_DISCONNECT;
6698 /* signal anyone waiting in the CONN_QUIESCE state */
6699 cv_broadcast(&seg->s_cv);
6700
6701 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
6702 "rsm_closeconnection done\n"));
6703
6704 return (RSM_SUCCESS);
6705 }
6706
6707 int
6708 rsm_disconnect(rsmseg_t *seg)
7267 DBG_DEFINE(category, RSM_KERNEL_AGENT | RSM_IOCTL);
7268
7269 event_list = events;
7270
7271 if ((rc = rsm_consumeevent_copyin(arg, &msg, &event_list, mode)) !=
7272 RSM_SUCCESS) {
7273 return (rc);
7274 }
7275
7276 for (i = 0; i < msg.numents; i++) {
7277 rnum = event_list[i].rnum;
7278 event_list[i].revent = 0;
7279 /* get the segment structure */
7280 seg = (rsmseg_t *)rsmresource_lookup(rnum, RSM_LOCK);
7281 if (seg) {
7282 DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
7283 "consumeevent_ioctl: rnum(%d) seg(%p)\n", rnum,
7284 seg));
7285 if (seg->s_pollevent) {
7286 /* consume the event */
7287 atomic_dec_32(&seg->s_pollevent);
7288 event_list[i].revent = POLLRDNORM;
7289 }
7290 rsmseglock_release(seg);
7291 }
7292 }
7293
7294 if ((rc = rsm_consumeevent_copyout(&msg, event_list, mode)) !=
7295 RSM_SUCCESS) {
7296 return (rc);
7297 }
7298
7299 return (RSM_SUCCESS);
7300 }
7301
7302 static int
7303 iovec_copyin(caddr_t user_vec, rsmka_iovec_t *iovec, int count, int mode)
7304 {
7305 int size;
7306 DBG_DEFINE(category, RSM_KERNEL_AGENT | RSM_IMPORT | RSM_IOCTL);
7307
|