99 int mdi_vhci_count;
100 mdi_vhci_t *mdi_vhci_head;
101 mdi_vhci_t *mdi_vhci_tail;
102
103 /*
104 * Client Hash Table size
105 */
106 static int mdi_client_table_size = CLIENT_HASH_TABLE_SIZE;
107
108 /*
109 * taskq interface definitions
110 */
111 #define MDI_TASKQ_N_THREADS 8
112 #define MDI_TASKQ_PRI minclsyspri
113 #define MDI_TASKQ_MINALLOC (4*mdi_taskq_n_threads)
114 #define MDI_TASKQ_MAXALLOC (500*mdi_taskq_n_threads)
115
116 taskq_t *mdi_taskq;
117 static uint_t mdi_taskq_n_threads = MDI_TASKQ_N_THREADS;
118
119 #define TICKS_PER_SECOND (drv_usectohz(1000000))
120
121 /*
122 * The data should be "quiet" for this interval (in seconds) before the
123 * vhci cached data is flushed to the disk.
124 */
125 static int mdi_vhcache_flush_delay = 10;
126
127 /* number of seconds the vhcache flush daemon will sleep idle before exiting */
128 static int mdi_vhcache_flush_daemon_idle_time = 60;
129
130 /*
131 * MDI falls back to discovery of all paths when a bus_config_one fails.
132 * The following parameters can be used to tune this operation.
133 *
134 * mdi_path_discovery_boot
135 * Number of times path discovery will be attempted during early boot.
136 * Probably there is no reason to ever set this value to greater than one.
137 *
138 * mdi_path_discovery_postboot
139 * Number of times path discovery will be attempted after early boot.
3183 if (!(MDI_PI_IS_OFFLINE(pip) || MDI_PI_IS_INIT(pip) ||
3184 MDI_PI_IS_INITING(pip))) {
3185 /*
3186 * Node is busy
3187 */
3188 MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
3189 "!busy: pip %s %p", mdi_pi_spathname(pip), (void *)pip));
3190 MDI_PI_UNLOCK(pip);
3191 return (MDI_BUSY);
3192 }
3193
3194 while (MDI_PI(pip)->pi_ref_cnt != 0) {
3195 /*
3196 * Give a chance for pending I/Os to complete.
3197 */
3198 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3199 "!%d cmds still pending on path: %s %p",
3200 MDI_PI(pip)->pi_ref_cnt,
3201 mdi_pi_spathname(pip), (void *)pip));
3202 if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
3203 &MDI_PI(pip)->pi_mutex, drv_usectohz(60 * 1000000),
3204 TR_CLOCK_TICK) == -1) {
3205 /*
3206 * The timeout time reached without ref_cnt being zero
3207 * being signaled.
3208 */
3209 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3210 "!Timeout reached on path %s %p without the cond",
3211 mdi_pi_spathname(pip), (void *)pip));
3212 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3213 "!%d cmds still pending on path %s %p",
3214 MDI_PI(pip)->pi_ref_cnt,
3215 mdi_pi_spathname(pip), (void *)pip));
3216 MDI_PI_UNLOCK(pip);
3217 return (MDI_BUSY);
3218 }
3219 }
3220 if (MDI_PI(pip)->pi_pm_held) {
3221 client_held = 1;
3222 }
3223 MDI_PI_UNLOCK(pip);
3849 {
3850 dev_info_t *vdip = NULL;
3851 mdi_vhci_t *vh = NULL;
3852 mdi_client_t *ct = NULL;
3853 int (*f)();
3854 int rv;
3855
3856 MDI_PI_LOCK(pip);
3857 ct = MDI_PI(pip)->pi_client;
3858 ASSERT(ct != NULL);
3859
3860 while (MDI_PI(pip)->pi_ref_cnt != 0) {
3861 /*
3862 * Give a chance for pending I/Os to complete.
3863 */
3864 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3865 "!%d cmds still pending on path %s %p",
3866 MDI_PI(pip)->pi_ref_cnt, mdi_pi_spathname(pip),
3867 (void *)pip));
3868 if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
3869 &MDI_PI(pip)->pi_mutex, drv_usectohz(60 * 1000000),
3870 TR_CLOCK_TICK) == -1) {
3871 /*
3872 * The timeout time reached without ref_cnt being zero
3873 * being signaled.
3874 */
3875 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3876 "!Timeout reached on path %s %p without the cond",
3877 mdi_pi_spathname(pip), (void *)pip));
3878 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3879 "!%d cmds still pending on path %s %p",
3880 MDI_PI(pip)->pi_ref_cnt,
3881 mdi_pi_spathname(pip), (void *)pip));
3882 }
3883 }
3884 vh = ct->ct_vhci;
3885 vdip = vh->vh_dip;
3886
3887 /*
3888 * Notify vHCI that has registered this event
3889 */
|
99 int mdi_vhci_count;
100 mdi_vhci_t *mdi_vhci_head;
101 mdi_vhci_t *mdi_vhci_tail;
102
103 /*
104 * Client Hash Table size
105 */
106 static int mdi_client_table_size = CLIENT_HASH_TABLE_SIZE;
107
108 /*
109 * taskq interface definitions
110 */
111 #define MDI_TASKQ_N_THREADS 8
112 #define MDI_TASKQ_PRI minclsyspri
113 #define MDI_TASKQ_MINALLOC (4*mdi_taskq_n_threads)
114 #define MDI_TASKQ_MAXALLOC (500*mdi_taskq_n_threads)
115
116 taskq_t *mdi_taskq;
117 static uint_t mdi_taskq_n_threads = MDI_TASKQ_N_THREADS;
118
119 #define TICKS_PER_SECOND drv_sectohz(1)
120
121 /*
122 * The data should be "quiet" for this interval (in seconds) before the
123 * vhci cached data is flushed to the disk.
124 */
125 static int mdi_vhcache_flush_delay = 10;
126
127 /* number of seconds the vhcache flush daemon will sleep idle before exiting */
128 static int mdi_vhcache_flush_daemon_idle_time = 60;
129
130 /*
131 * MDI falls back to discovery of all paths when a bus_config_one fails.
132 * The following parameters can be used to tune this operation.
133 *
134 * mdi_path_discovery_boot
135 * Number of times path discovery will be attempted during early boot.
136 * Probably there is no reason to ever set this value to greater than one.
137 *
138 * mdi_path_discovery_postboot
139 * Number of times path discovery will be attempted after early boot.
3183 if (!(MDI_PI_IS_OFFLINE(pip) || MDI_PI_IS_INIT(pip) ||
3184 MDI_PI_IS_INITING(pip))) {
3185 /*
3186 * Node is busy
3187 */
3188 MDI_DEBUG(1, (MDI_WARN, ct->ct_dip,
3189 "!busy: pip %s %p", mdi_pi_spathname(pip), (void *)pip));
3190 MDI_PI_UNLOCK(pip);
3191 return (MDI_BUSY);
3192 }
3193
3194 while (MDI_PI(pip)->pi_ref_cnt != 0) {
3195 /*
3196 * Give a chance for pending I/Os to complete.
3197 */
3198 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3199 "!%d cmds still pending on path: %s %p",
3200 MDI_PI(pip)->pi_ref_cnt,
3201 mdi_pi_spathname(pip), (void *)pip));
3202 if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
3203 &MDI_PI(pip)->pi_mutex, drv_sectohz(60),
3204 TR_CLOCK_TICK) == -1) {
3205 /*
3206 * The timeout time reached without ref_cnt being zero
3207 * being signaled.
3208 */
3209 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3210 "!Timeout reached on path %s %p without the cond",
3211 mdi_pi_spathname(pip), (void *)pip));
3212 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3213 "!%d cmds still pending on path %s %p",
3214 MDI_PI(pip)->pi_ref_cnt,
3215 mdi_pi_spathname(pip), (void *)pip));
3216 MDI_PI_UNLOCK(pip);
3217 return (MDI_BUSY);
3218 }
3219 }
3220 if (MDI_PI(pip)->pi_pm_held) {
3221 client_held = 1;
3222 }
3223 MDI_PI_UNLOCK(pip);
3849 {
3850 dev_info_t *vdip = NULL;
3851 mdi_vhci_t *vh = NULL;
3852 mdi_client_t *ct = NULL;
3853 int (*f)();
3854 int rv;
3855
3856 MDI_PI_LOCK(pip);
3857 ct = MDI_PI(pip)->pi_client;
3858 ASSERT(ct != NULL);
3859
3860 while (MDI_PI(pip)->pi_ref_cnt != 0) {
3861 /*
3862 * Give a chance for pending I/Os to complete.
3863 */
3864 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3865 "!%d cmds still pending on path %s %p",
3866 MDI_PI(pip)->pi_ref_cnt, mdi_pi_spathname(pip),
3867 (void *)pip));
3868 if (cv_reltimedwait(&MDI_PI(pip)->pi_ref_cv,
3869 &MDI_PI(pip)->pi_mutex, drv_sectohz(60),
3870 TR_CLOCK_TICK) == -1) {
3871 /*
3872 * The timeout time reached without ref_cnt being zero
3873 * being signaled.
3874 */
3875 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3876 "!Timeout reached on path %s %p without the cond",
3877 mdi_pi_spathname(pip), (void *)pip));
3878 MDI_DEBUG(1, (MDI_NOTE, ct->ct_dip,
3879 "!%d cmds still pending on path %s %p",
3880 MDI_PI(pip)->pi_ref_cnt,
3881 mdi_pi_spathname(pip), (void *)pip));
3882 }
3883 }
3884 vh = ct->ct_vhci;
3885 vdip = vh->vh_dip;
3886
3887 /*
3888 * Notify vHCI that has registered this event
3889 */
|