Print this page
4779 vhci shouldn't abuse ddi_get_time(9f)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
+++ new/usr/src/uts/common/io/scsi/adapters/scsi_vhci/scsi_vhci.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 +/*
25 + * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 + */
24 27
25 28 /*
26 29 * Multiplexed I/O SCSI vHCI implementation
27 30 */
28 31
29 32 #include <sys/conf.h>
30 33 #include <sys/file.h>
31 34 #include <sys/ddi.h>
32 35 #include <sys/sunddi.h>
33 36 #include <sys/scsi/scsi.h>
34 37 #include <sys/scsi/impl/scsi_reset_notify.h>
35 38 #include <sys/scsi/impl/services.h>
36 39 #include <sys/sunmdi.h>
37 40 #include <sys/mdi_impldefs.h>
38 41 #include <sys/scsi/adapters/scsi_vhci.h>
39 42 #include <sys/disp.h>
40 43 #include <sys/byteorder.h>
41 44
42 45 extern uintptr_t scsi_callback_id;
43 46 extern ddi_dma_attr_t scsi_alloc_attr;
44 47
45 48 #ifdef DEBUG
46 49 int vhci_debug = VHCI_DEBUG_DEFAULT_VAL;
47 50 #endif
48 51
49 52 /* retry for the vhci_do_prout command when a not ready is returned */
50 53 int vhci_prout_not_ready_retry = 180;
51 54
52 55 /*
53 56 * These values are defined to support the internal retry of
54 57 * SCSI packets for better sense code handling.
55 58 */
56 59 #define VHCI_CMD_CMPLT 0
57 60 #define VHCI_CMD_RETRY 1
58 61 #define VHCI_CMD_ERROR -1
59 62
60 63 #define PROPFLAGS (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)
61 64 #define VHCI_SCSI_PERR 0x47
62 65 #define VHCI_PGR_ILLEGALOP -2
63 66 #define VHCI_NUM_UPDATE_TASKQ 8
64 67 /* changed to 132 to accomodate HDS */
65 68
66 69 /*
67 70 * Version Macros
68 71 */
69 72 #define VHCI_NAME_VERSION "SCSI VHCI Driver"
70 73 char vhci_version_name[] = VHCI_NAME_VERSION;
71 74
72 75 int vhci_first_time = 0;
73 76 clock_t vhci_to_ticks = 0;
74 77 int vhci_init_wait_timeout = VHCI_INIT_WAIT_TIMEOUT;
75 78 kcondvar_t vhci_cv;
76 79 kmutex_t vhci_global_mutex;
77 80 void *vhci_softstate = NULL; /* for soft state */
78 81
79 82 /*
80 83 * Flag to delay the retry of the reserve command
81 84 */
82 85 int vhci_reserve_delay = 100000;
83 86 static int vhci_path_quiesce_timeout = 60;
84 87 static uchar_t zero_key[MHIOC_RESV_KEY_SIZE];
85 88
86 89 /* uscsi delay for a TRAN_BUSY */
87 90 static int vhci_uscsi_delay = 100000;
88 91 static int vhci_uscsi_retry_count = 180;
89 92 /* uscsi_restart_sense timeout id in case it needs to get canceled */
90 93 static timeout_id_t vhci_restart_timeid = 0;
91 94
92 95 static int vhci_bus_config_debug = 0;
93 96
94 97 /*
95 98 * Bidirectional map of 'target-port' to port id <pid> for support of
96 99 * iostat(1M) '-Xx' and '-Yx' output.
97 100 */
98 101 static kmutex_t vhci_targetmap_mutex;
99 102 static uint_t vhci_targetmap_pid = 1;
100 103 static mod_hash_t *vhci_targetmap_bypid; /* <pid> -> 'target-port' */
101 104 static mod_hash_t *vhci_targetmap_byport; /* 'target-port' -> <pid> */
102 105
103 106 /*
104 107 * functions exported by scsi_vhci struct cb_ops
105 108 */
106 109 static int vhci_open(dev_t *, int, int, cred_t *);
107 110 static int vhci_close(dev_t, int, int, cred_t *);
108 111 static int vhci_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
109 112
110 113 /*
111 114 * functions exported by scsi_vhci struct dev_ops
112 115 */
113 116 static int vhci_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
114 117 static int vhci_attach(dev_info_t *, ddi_attach_cmd_t);
115 118 static int vhci_detach(dev_info_t *, ddi_detach_cmd_t);
116 119
117 120 /*
118 121 * functions exported by scsi_vhci scsi_hba_tran_t transport table
119 122 */
120 123 static int vhci_scsi_tgt_init(dev_info_t *, dev_info_t *,
121 124 scsi_hba_tran_t *, struct scsi_device *);
122 125 static void vhci_scsi_tgt_free(dev_info_t *, dev_info_t *, scsi_hba_tran_t *,
123 126 struct scsi_device *);
124 127 static int vhci_pgr_register_start(scsi_vhci_lun_t *, struct scsi_pkt *);
125 128 static int vhci_scsi_start(struct scsi_address *, struct scsi_pkt *);
126 129 static int vhci_scsi_abort(struct scsi_address *, struct scsi_pkt *);
127 130 static int vhci_scsi_reset(struct scsi_address *, int);
128 131 static int vhci_scsi_reset_target(struct scsi_address *, int level,
129 132 uint8_t select_path);
130 133 static int vhci_scsi_reset_bus(struct scsi_address *);
131 134 static int vhci_scsi_getcap(struct scsi_address *, char *, int);
132 135 static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
133 136 static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
134 137 static int vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
135 138 mdi_pathinfo_t *pip);
136 139 static struct scsi_pkt *vhci_scsi_init_pkt(struct scsi_address *,
137 140 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
138 141 static void vhci_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
139 142 static void vhci_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
140 143 static void vhci_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
141 144 static int vhci_scsi_reset_notify(struct scsi_address *, int, void (*)(caddr_t),
142 145 caddr_t);
143 146 static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
144 147 static int vhci_scsi_get_name(struct scsi_device *, char *, int);
145 148 static int vhci_scsi_bus_power(dev_info_t *, void *, pm_bus_power_op_t,
146 149 void *, void *);
147 150 static int vhci_scsi_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
148 151 void *, dev_info_t **);
149 152 static int vhci_scsi_bus_unconfig(dev_info_t *, uint_t, ddi_bus_config_op_t,
150 153 void *);
151 154 static struct scsi_failover_ops *vhci_dev_fo(dev_info_t *, struct scsi_device *,
152 155 void **, char **);
153 156
154 157 /*
155 158 * functions registered with the mpxio framework via mdi_vhci_ops_t
156 159 */
157 160 static int vhci_pathinfo_init(dev_info_t *, mdi_pathinfo_t *, int);
158 161 static int vhci_pathinfo_uninit(dev_info_t *, mdi_pathinfo_t *, int);
159 162 static int vhci_pathinfo_state_change(dev_info_t *, mdi_pathinfo_t *,
160 163 mdi_pathinfo_state_t, uint32_t, int);
161 164 static int vhci_pathinfo_online(dev_info_t *, mdi_pathinfo_t *, int);
162 165 static int vhci_pathinfo_offline(dev_info_t *, mdi_pathinfo_t *, int);
163 166 static int vhci_failover(dev_info_t *, dev_info_t *, int);
164 167 static void vhci_client_attached(dev_info_t *);
165 168 static int vhci_is_dev_supported(dev_info_t *, dev_info_t *, void *);
166 169
167 170 static int vhci_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
168 171 static int vhci_devctl(dev_t, int, intptr_t, int, cred_t *, int *);
169 172 static int vhci_ioc_get_phci_path(sv_iocdata_t *, caddr_t, int, caddr_t);
170 173 static int vhci_ioc_get_client_path(sv_iocdata_t *, caddr_t, int, caddr_t);
171 174 static int vhci_ioc_get_paddr(sv_iocdata_t *, caddr_t, int, caddr_t);
172 175 static int vhci_ioc_send_client_path(caddr_t, sv_iocdata_t *, int, caddr_t);
173 176 static void vhci_ioc_devi_to_path(dev_info_t *, caddr_t);
174 177 static int vhci_get_phci_path_list(dev_info_t *, sv_path_info_t *, uint_t);
175 178 static int vhci_get_client_path_list(dev_info_t *, sv_path_info_t *, uint_t);
176 179 static int vhci_get_iocdata(const void *, sv_iocdata_t *, int, caddr_t);
177 180 static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
178 181 int, caddr_t);
179 182 static int vhci_ioc_alloc_pathinfo(sv_path_info_t **, sv_path_info_t **,
180 183 uint_t, sv_iocdata_t *, int, caddr_t);
181 184 static void vhci_ioc_free_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t);
182 185 static int vhci_ioc_send_pathinfo(sv_path_info_t *, sv_path_info_t *, uint_t,
183 186 sv_iocdata_t *, int, caddr_t);
184 187 static int vhci_handle_ext_fo(struct scsi_pkt *, int);
185 188 static int vhci_efo_watch_cb(caddr_t, struct scsi_watch_result *);
186 189 static int vhci_quiesce_lun(struct scsi_vhci_lun *);
187 190 static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
188 191 static void vhci_dispatch_scsi_start(void *);
189 192 static void vhci_efo_done(void *);
190 193 static void vhci_initiate_auto_failback(void *);
191 194 static void vhci_update_pHCI_pkt(struct vhci_pkt *, struct scsi_pkt *);
192 195 static int vhci_update_pathinfo(struct scsi_device *, mdi_pathinfo_t *,
193 196 struct scsi_failover_ops *, scsi_vhci_lun_t *, struct scsi_vhci *);
194 197 static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
195 198 static int vhci_quiesce_paths(dev_info_t *, dev_info_t *,
196 199 scsi_vhci_lun_t *, char *, char *);
197 200
198 201 static char *vhci_devnm_to_guid(char *);
199 202 static int vhci_bind_transport(struct scsi_address *, struct vhci_pkt *,
200 203 int, int (*func)(caddr_t));
201 204 static void vhci_intr(struct scsi_pkt *);
202 205 static int vhci_do_prout(scsi_vhci_priv_t *);
203 206 static void vhci_run_cmd(void *);
204 207 static int vhci_do_prin(struct vhci_pkt **);
205 208 static struct scsi_pkt *vhci_create_retry_pkt(struct vhci_pkt *);
206 209 static struct vhci_pkt *vhci_sync_retry_pkt(struct vhci_pkt *);
207 210 static struct scsi_vhci_lun *vhci_lun_lookup(dev_info_t *);
208 211 static struct scsi_vhci_lun *vhci_lun_lookup_alloc(dev_info_t *, char *, int *);
209 212 static void vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd);
210 213 static int vhci_recovery_reset(scsi_vhci_lun_t *, struct scsi_address *,
211 214 uint8_t, uint8_t);
212 215 void vhci_update_pathstates(void *);
213 216
214 217 #ifdef DEBUG
215 218 static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
216 219 static void vhci_print_cdb(dev_info_t *dip, uint_t level,
217 220 char *title, uchar_t *cdb);
218 221 static void vhci_clean_print(dev_info_t *dev, uint_t level,
219 222 char *title, uchar_t *data, int len);
220 223 #endif
221 224 static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
222 225 static void vhci_uscsi_iodone(struct scsi_pkt *pkt);
223 226 static void vhci_invalidate_mpapi_lu(struct scsi_vhci *, scsi_vhci_lun_t *);
224 227
225 228 /*
226 229 * MP-API related functions
227 230 */
228 231 extern int vhci_mpapi_init(struct scsi_vhci *);
229 232 extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
230 233 extern int vhci_mpapi_ctl(dev_t, int, intptr_t, int, cred_t *, int *);
231 234 extern void vhci_update_mpapi_data(struct scsi_vhci *,
232 235 scsi_vhci_lun_t *, mdi_pathinfo_t *);
233 236 extern void* vhci_get_mpapi_item(struct scsi_vhci *, mpapi_list_header_t *,
234 237 uint8_t, void*);
235 238 extern void vhci_mpapi_set_path_state(dev_info_t *, mdi_pathinfo_t *, int);
236 239 extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
237 240 scsi_vhci_lun_t *);
238 241
239 242 #define VHCI_DMA_MAX_XFER_CAP INT_MAX
240 243
241 244 #define VHCI_MAX_PGR_RETRIES 3
242 245
243 246 /*
244 247 * Macros for the device-type mpxio options
245 248 */
246 249 #define LOAD_BALANCE_OPTIONS "load-balance-options"
247 250 #define LOGICAL_BLOCK_REGION_SIZE "region-size"
248 251 #define MPXIO_OPTIONS_LIST "device-type-mpxio-options-list"
249 252 #define DEVICE_TYPE_STR "device-type"
250 253 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
251 254
252 255 static struct cb_ops vhci_cb_ops = {
253 256 vhci_open, /* open */
254 257 vhci_close, /* close */
255 258 nodev, /* strategy */
256 259 nodev, /* print */
257 260 nodev, /* dump */
258 261 nodev, /* read */
259 262 nodev, /* write */
260 263 vhci_ioctl, /* ioctl */
261 264 nodev, /* devmap */
262 265 nodev, /* mmap */
263 266 nodev, /* segmap */
264 267 nochpoll, /* chpoll */
265 268 ddi_prop_op, /* cb_prop_op */
266 269 0, /* streamtab */
267 270 D_NEW | D_MP, /* cb_flag */
268 271 CB_REV, /* rev */
269 272 nodev, /* aread */
270 273 nodev /* awrite */
271 274 };
272 275
273 276 static struct dev_ops vhci_ops = {
274 277 DEVO_REV,
275 278 0,
276 279 vhci_getinfo,
277 280 nulldev, /* identify */
278 281 nulldev, /* probe */
279 282 vhci_attach, /* attach and detach are mandatory */
280 283 vhci_detach,
281 284 nodev, /* reset */
282 285 &vhci_cb_ops, /* cb_ops */
283 286 NULL, /* bus_ops */
284 287 NULL, /* power */
285 288 ddi_quiesce_not_needed, /* quiesce */
286 289 };
287 290
288 291 extern struct mod_ops mod_driverops;
289 292
290 293 static struct modldrv modldrv = {
291 294 &mod_driverops,
292 295 vhci_version_name, /* module name */
293 296 &vhci_ops
294 297 };
295 298
296 299 static struct modlinkage modlinkage = {
297 300 MODREV_1,
298 301 &modldrv,
299 302 NULL
300 303 };
301 304
302 305 static mdi_vhci_ops_t vhci_opinfo = {
303 306 MDI_VHCI_OPS_REV,
304 307 vhci_pathinfo_init, /* Pathinfo node init callback */
305 308 vhci_pathinfo_uninit, /* Pathinfo uninit callback */
306 309 vhci_pathinfo_state_change, /* Pathinfo node state change */
307 310 vhci_failover, /* failover callback */
308 311 vhci_client_attached, /* client attached callback */
309 312 vhci_is_dev_supported /* is device supported by mdi */
310 313 };
311 314
312 315 /*
313 316 * The scsi_failover table defines an ordered set of 'fops' modules supported
314 317 * by scsi_vhci. Currently, initialize this table from the 'ddi-forceload'
315 318 * property specified in scsi_vhci.conf.
316 319 */
317 320 static struct scsi_failover {
318 321 ddi_modhandle_t sf_mod;
319 322 struct scsi_failover_ops *sf_sfo;
320 323 } *scsi_failover_table;
321 324 static uint_t scsi_nfailover;
322 325
323 326 int
324 327 _init(void)
325 328 {
326 329 int rval;
327 330
328 331 /*
329 332 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
330 333 * before registering with the transport first.
331 334 */
332 335 if ((rval = ddi_soft_state_init(&vhci_softstate,
333 336 sizeof (struct scsi_vhci), 1)) != 0) {
334 337 VHCI_DEBUG(1, (CE_NOTE, NULL,
335 338 "!_init:soft state init failed\n"));
336 339 return (rval);
337 340 }
338 341
339 342 if ((rval = scsi_hba_init(&modlinkage)) != 0) {
340 343 VHCI_DEBUG(1, (CE_NOTE, NULL,
341 344 "!_init: scsi hba init failed\n"));
342 345 ddi_soft_state_fini(&vhci_softstate);
343 346 return (rval);
344 347 }
345 348
346 349 mutex_init(&vhci_global_mutex, NULL, MUTEX_DRIVER, NULL);
347 350 cv_init(&vhci_cv, NULL, CV_DRIVER, NULL);
348 351
349 352 mutex_init(&vhci_targetmap_mutex, NULL, MUTEX_DRIVER, NULL);
350 353 vhci_targetmap_byport = mod_hash_create_strhash(
351 354 "vhci_targetmap_byport", 256, mod_hash_null_valdtor);
352 355 vhci_targetmap_bypid = mod_hash_create_idhash(
353 356 "vhci_targetmap_bypid", 256, mod_hash_null_valdtor);
354 357
355 358 if ((rval = mod_install(&modlinkage)) != 0) {
356 359 VHCI_DEBUG(1, (CE_NOTE, NULL, "!_init: mod_install failed\n"));
357 360 if (vhci_targetmap_bypid)
358 361 mod_hash_destroy_idhash(vhci_targetmap_bypid);
359 362 if (vhci_targetmap_byport)
360 363 mod_hash_destroy_strhash(vhci_targetmap_byport);
361 364 mutex_destroy(&vhci_targetmap_mutex);
362 365 cv_destroy(&vhci_cv);
363 366 mutex_destroy(&vhci_global_mutex);
364 367 scsi_hba_fini(&modlinkage);
365 368 ddi_soft_state_fini(&vhci_softstate);
366 369 }
367 370 return (rval);
368 371 }
369 372
370 373
371 374 /*
372 375 * the system is done with us as a driver, so clean up
373 376 */
374 377 int
375 378 _fini(void)
376 379 {
377 380 int rval;
378 381
379 382 /*
380 383 * don't start cleaning up until we know that the module remove
381 384 * has worked -- if this works, then we know that each instance
382 385 * has successfully been DDI_DETACHed
383 386 */
384 387 if ((rval = mod_remove(&modlinkage)) != 0) {
385 388 VHCI_DEBUG(4, (CE_NOTE, NULL, "!_fini: mod_remove failed\n"));
386 389 return (rval);
387 390 }
388 391
389 392 if (vhci_targetmap_bypid)
390 393 mod_hash_destroy_idhash(vhci_targetmap_bypid);
391 394 if (vhci_targetmap_byport)
392 395 mod_hash_destroy_strhash(vhci_targetmap_byport);
393 396 mutex_destroy(&vhci_targetmap_mutex);
394 397 cv_destroy(&vhci_cv);
395 398 mutex_destroy(&vhci_global_mutex);
396 399 scsi_hba_fini(&modlinkage);
397 400 ddi_soft_state_fini(&vhci_softstate);
398 401
399 402 return (rval);
400 403 }
401 404
402 405 int
403 406 _info(struct modinfo *modinfop)
404 407 {
405 408 return (mod_info(&modlinkage, modinfop));
406 409 }
407 410
408 411 /*
409 412 * Lookup scsi_failover by "short name" of failover module.
410 413 */
411 414 struct scsi_failover_ops *
412 415 vhci_failover_ops_by_name(char *name)
413 416 {
414 417 struct scsi_failover *sf;
415 418
416 419 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
417 420 if (sf->sf_sfo == NULL)
418 421 continue;
419 422 if (strcmp(sf->sf_sfo->sfo_name, name) == 0)
420 423 return (sf->sf_sfo);
421 424 }
422 425 return (NULL);
423 426 }
424 427
425 428 /*
426 429 * Load all scsi_failover_ops 'fops' modules.
427 430 */
428 431 static void
429 432 vhci_failover_modopen(struct scsi_vhci *vhci)
430 433 {
431 434 char **module;
432 435 int i;
433 436 struct scsi_failover *sf;
434 437 char **dt;
435 438 int e;
436 439
437 440 if (scsi_failover_table)
438 441 return;
439 442
440 443 /* Get the list of modules from scsi_vhci.conf */
441 444 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY,
442 445 vhci->vhci_dip, DDI_PROP_DONTPASS, "ddi-forceload",
443 446 &module, &scsi_nfailover) != DDI_PROP_SUCCESS) {
444 447 cmn_err(CE_WARN, "scsi_vhci: "
445 448 "scsi_vhci.conf is missing 'ddi-forceload'");
446 449 return;
447 450 }
448 451 if (scsi_nfailover == 0) {
449 452 cmn_err(CE_WARN, "scsi_vhci: "
450 453 "scsi_vhci.conf has empty 'ddi-forceload'");
451 454 ddi_prop_free(module);
452 455 return;
453 456 }
454 457
455 458 /* allocate failover table based on number of modules */
456 459 scsi_failover_table = (struct scsi_failover *)
457 460 kmem_zalloc(sizeof (struct scsi_failover) * (scsi_nfailover + 1),
458 461 KM_SLEEP);
459 462
460 463 /* loop over modules specified in scsi_vhci.conf and open each module */
461 464 for (i = 0, sf = scsi_failover_table; i < scsi_nfailover; i++) {
462 465 if (module[i] == NULL)
463 466 continue;
464 467
465 468 sf->sf_mod = ddi_modopen(module[i], KRTLD_MODE_FIRST, &e);
466 469 if (sf->sf_mod == NULL) {
467 470 /*
468 471 * A module returns EEXIST if other software is
469 472 * supporting the intended function: for example
470 473 * the scsi_vhci_f_sum_emc module returns EEXIST
471 474 * from _init if EMC powerpath software is installed.
472 475 */
473 476 if (e != EEXIST)
474 477 cmn_err(CE_WARN, "scsi_vhci: unable to open "
475 478 "module '%s', error %d", module[i], e);
476 479 continue;
477 480 }
478 481 sf->sf_sfo = ddi_modsym(sf->sf_mod,
479 482 "scsi_vhci_failover_ops", &e);
480 483 if (sf->sf_sfo == NULL) {
481 484 cmn_err(CE_WARN, "scsi_vhci: "
482 485 "unable to import 'scsi_failover_ops' from '%s', "
483 486 "error %d", module[i], e);
484 487 (void) ddi_modclose(sf->sf_mod);
485 488 sf->sf_mod = NULL;
486 489 continue;
487 490 }
488 491
489 492 /* register vid/pid of devices supported with mpapi */
490 493 for (dt = sf->sf_sfo->sfo_devices; *dt; dt++)
491 494 vhci_mpapi_add_dev_prod(vhci, *dt);
492 495 sf++;
493 496 }
494 497
495 498 /* verify that at least the "well-known" modules were there */
496 499 if (vhci_failover_ops_by_name(SFO_NAME_SYM) == NULL)
497 500 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
498 501 SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
499 502 "'ddi-forceload'");
500 503 if (vhci_failover_ops_by_name(SFO_NAME_TPGS) == NULL)
501 504 cmn_err(CE_WARN, "scsi_vhci: well-known module \""
502 505 SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
503 506 "'ddi-forceload'");
504 507
505 508 /* call sfo_init for modules that need it */
506 509 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
507 510 if (sf->sf_sfo && sf->sf_sfo->sfo_init)
508 511 sf->sf_sfo->sfo_init();
509 512 }
510 513
511 514 ddi_prop_free(module);
512 515 }
513 516
514 517 /*
515 518 * unload all loaded scsi_failover_ops modules
516 519 */
517 520 static void
518 521 vhci_failover_modclose()
519 522 {
520 523 struct scsi_failover *sf;
521 524
522 525 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
523 526 if ((sf->sf_mod == NULL) || (sf->sf_sfo == NULL))
524 527 continue;
525 528 (void) ddi_modclose(sf->sf_mod);
526 529 sf->sf_mod = NULL;
527 530 sf->sf_sfo = NULL;
528 531 }
529 532
530 533 if (scsi_failover_table && scsi_nfailover)
531 534 kmem_free(scsi_failover_table,
532 535 sizeof (struct scsi_failover) * (scsi_nfailover + 1));
533 536 scsi_failover_table = NULL;
534 537 scsi_nfailover = 0;
535 538 }
536 539
537 540 /* ARGSUSED */
538 541 static int
539 542 vhci_open(dev_t *devp, int flag, int otype, cred_t *credp)
540 543 {
541 544 struct scsi_vhci *vhci;
542 545
543 546 if (otype != OTYP_CHR) {
544 547 return (EINVAL);
545 548 }
546 549
547 550 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(*devp)));
548 551 if (vhci == NULL) {
549 552 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_open: failed ENXIO\n"));
550 553 return (ENXIO);
551 554 }
552 555
553 556 mutex_enter(&vhci->vhci_mutex);
554 557 if ((flag & FEXCL) && (vhci->vhci_state & VHCI_STATE_OPEN)) {
555 558 mutex_exit(&vhci->vhci_mutex);
556 559 vhci_log(CE_NOTE, vhci->vhci_dip,
557 560 "!vhci%d: Already open\n", getminor(*devp));
558 561 return (EBUSY);
559 562 }
560 563
561 564 vhci->vhci_state |= VHCI_STATE_OPEN;
562 565 mutex_exit(&vhci->vhci_mutex);
563 566 return (0);
564 567 }
565 568
566 569
567 570 /* ARGSUSED */
568 571 static int
569 572 vhci_close(dev_t dev, int flag, int otype, cred_t *credp)
570 573 {
571 574 struct scsi_vhci *vhci;
572 575
573 576 if (otype != OTYP_CHR) {
574 577 return (EINVAL);
575 578 }
576 579
577 580 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
578 581 if (vhci == NULL) {
579 582 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_close: failed ENXIO\n"));
580 583 return (ENXIO);
581 584 }
582 585
583 586 mutex_enter(&vhci->vhci_mutex);
584 587 vhci->vhci_state &= ~VHCI_STATE_OPEN;
585 588 mutex_exit(&vhci->vhci_mutex);
586 589
587 590 return (0);
588 591 }
589 592
590 593 /* ARGSUSED */
591 594 static int
592 595 vhci_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
593 596 cred_t *credp, int *rval)
594 597 {
595 598 if (IS_DEVCTL(cmd)) {
596 599 return (vhci_devctl(dev, cmd, data, mode, credp, rval));
597 600 } else if (cmd == MP_CMD) {
598 601 return (vhci_mpapi_ctl(dev, cmd, data, mode, credp, rval));
599 602 } else {
600 603 return (vhci_ctl(dev, cmd, data, mode, credp, rval));
601 604 }
602 605 }
603 606
604 607 /*
605 608 * attach the module
606 609 */
607 610 static int
608 611 vhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
609 612 {
610 613 int rval = DDI_FAILURE;
611 614 int scsi_hba_attached = 0;
612 615 int vhci_attached = 0;
613 616 int mutex_initted = 0;
614 617 int instance;
615 618 struct scsi_vhci *vhci;
616 619 scsi_hba_tran_t *tran;
617 620 char cache_name_buf[64];
618 621 char *data;
619 622
620 623 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_attach: cmd=0x%x\n", cmd));
621 624
622 625 instance = ddi_get_instance(dip);
623 626
624 627 switch (cmd) {
625 628 case DDI_ATTACH:
626 629 break;
627 630
628 631 case DDI_RESUME:
629 632 case DDI_PM_RESUME:
630 633 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_attach: resume not yet"
631 634 "implemented\n"));
632 635 return (rval);
633 636
634 637 default:
635 638 VHCI_DEBUG(1, (CE_NOTE, NULL,
636 639 "!vhci_attach: unknown ddi command\n"));
637 640 return (rval);
638 641 }
639 642
640 643 /*
641 644 * Allocate vhci data structure.
642 645 */
643 646 if (ddi_soft_state_zalloc(vhci_softstate, instance) != DDI_SUCCESS) {
644 647 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
645 648 "soft state alloc failed\n"));
646 649 return (DDI_FAILURE);
647 650 }
648 651
649 652 if ((vhci = ddi_get_soft_state(vhci_softstate, instance)) == NULL) {
650 653 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
651 654 "bad soft state\n"));
652 655 ddi_soft_state_free(vhci_softstate, instance);
653 656 return (DDI_FAILURE);
654 657 }
655 658
656 659 /* Allocate packet cache */
657 660 (void) snprintf(cache_name_buf, sizeof (cache_name_buf),
658 661 "vhci%d_cache", instance);
659 662
660 663 mutex_init(&vhci->vhci_mutex, NULL, MUTEX_DRIVER, NULL);
661 664 mutex_initted++;
662 665
663 666 /*
664 667 * Allocate a transport structure
665 668 */
666 669 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
667 670 ASSERT(tran != NULL);
668 671
669 672 vhci->vhci_tran = tran;
670 673 vhci->vhci_dip = dip;
671 674 vhci->vhci_instance = instance;
672 675
673 676 tran->tran_hba_private = vhci;
674 677 tran->tran_tgt_init = vhci_scsi_tgt_init;
675 678 tran->tran_tgt_probe = NULL;
676 679 tran->tran_tgt_free = vhci_scsi_tgt_free;
677 680
678 681 tran->tran_start = vhci_scsi_start;
679 682 tran->tran_abort = vhci_scsi_abort;
680 683 tran->tran_reset = vhci_scsi_reset;
681 684 tran->tran_getcap = vhci_scsi_getcap;
682 685 tran->tran_setcap = vhci_scsi_setcap;
683 686 tran->tran_init_pkt = vhci_scsi_init_pkt;
684 687 tran->tran_destroy_pkt = vhci_scsi_destroy_pkt;
685 688 tran->tran_dmafree = vhci_scsi_dmafree;
686 689 tran->tran_sync_pkt = vhci_scsi_sync_pkt;
687 690 tran->tran_reset_notify = vhci_scsi_reset_notify;
688 691
689 692 tran->tran_get_bus_addr = vhci_scsi_get_bus_addr;
690 693 tran->tran_get_name = vhci_scsi_get_name;
691 694 tran->tran_bus_reset = NULL;
692 695 tran->tran_quiesce = NULL;
693 696 tran->tran_unquiesce = NULL;
694 697
695 698 /*
696 699 * register event notification routines with scsa
697 700 */
698 701 tran->tran_get_eventcookie = NULL;
699 702 tran->tran_add_eventcall = NULL;
700 703 tran->tran_remove_eventcall = NULL;
701 704 tran->tran_post_event = NULL;
702 705
703 706 tran->tran_bus_power = vhci_scsi_bus_power;
704 707
705 708 tran->tran_bus_config = vhci_scsi_bus_config;
706 709 tran->tran_bus_unconfig = vhci_scsi_bus_unconfig;
707 710
708 711 /*
709 712 * Attach this instance with the mpxio framework
710 713 */
711 714 if (mdi_vhci_register(MDI_HCI_CLASS_SCSI, dip, &vhci_opinfo, 0)
712 715 != MDI_SUCCESS) {
713 716 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
714 717 "mdi_vhci_register failed\n"));
715 718 goto attach_fail;
716 719 }
717 720 vhci_attached++;
718 721
719 722 /*
720 723 * Attach this instance of the hba.
721 724 *
722 725 * Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
723 726 * driver, it has nothing to do with DMA. However, when calling
724 727 * scsi_hba_attach_setup() we need to pass something valid in the
725 728 * dma attributes parameter. So we just use scsi_alloc_attr.
726 729 * SCSA itself seems to care only for dma_attr_minxfer and
727 730 * dma_attr_burstsizes fields of dma attributes structure.
728 731 * It expects those fileds to be non-zero.
729 732 */
730 733 if (scsi_hba_attach_setup(dip, &scsi_alloc_attr, tran,
731 734 SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
732 735 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
733 736 "hba attach failed\n"));
734 737 goto attach_fail;
735 738 }
736 739 scsi_hba_attached++;
737 740
738 741 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
739 742 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
740 743 VHCI_DEBUG(1, (CE_NOTE, dip, "!vhci_attach:"
741 744 " ddi_create_minor_node failed\n"));
742 745 goto attach_fail;
743 746 }
744 747
745 748 /*
746 749 * Set pm-want-child-notification property for
747 750 * power management of the phci and client
748 751 */
749 752 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
750 753 "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
751 754 cmn_err(CE_WARN,
752 755 "%s%d fail to create pm-want-child-notification? prop",
753 756 ddi_driver_name(dip), ddi_get_instance(dip));
754 757 goto attach_fail;
755 758 }
756 759
757 760 vhci->vhci_taskq = taskq_create("vhci_taskq", 1, MINCLSYSPRI, 1, 4, 0);
758 761 vhci->vhci_update_pathstates_taskq =
759 762 taskq_create("vhci_update_pathstates", VHCI_NUM_UPDATE_TASKQ,
760 763 MINCLSYSPRI, 1, 4, 0);
761 764 ASSERT(vhci->vhci_taskq);
762 765 ASSERT(vhci->vhci_update_pathstates_taskq);
763 766
764 767 /*
765 768 * Set appropriate configuration flags based on options set in
766 769 * conf file.
767 770 */
768 771 vhci->vhci_conf_flags = 0;
769 772 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, PROPFLAGS,
770 773 "auto-failback", &data) == DDI_SUCCESS) {
771 774 if (strcmp(data, "enable") == 0)
772 775 vhci->vhci_conf_flags |= VHCI_CONF_FLAGS_AUTO_FAILBACK;
773 776 ddi_prop_free(data);
774 777 }
775 778
776 779 if (!(vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK))
777 780 vhci_log(CE_NOTE, dip, "!Auto-failback capability "
778 781 "disabled through scsi_vhci.conf file.");
779 782
780 783 /*
781 784 * Allocate an mpapi private structure
782 785 */
783 786 vhci->mp_priv = kmem_zalloc(sizeof (mpapi_priv_t), KM_SLEEP);
784 787 if (vhci_mpapi_init(vhci) != 0) {
785 788 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_attach: "
786 789 "vhci_mpapi_init() failed"));
787 790 }
788 791
789 792 vhci_failover_modopen(vhci); /* load failover modules */
790 793
791 794 ddi_report_dev(dip);
792 795 return (DDI_SUCCESS);
793 796
794 797 attach_fail:
795 798 if (vhci_attached)
796 799 (void) mdi_vhci_unregister(dip, 0);
797 800
798 801 if (scsi_hba_attached)
799 802 (void) scsi_hba_detach(dip);
800 803
801 804 if (vhci->vhci_tran)
802 805 scsi_hba_tran_free(vhci->vhci_tran);
803 806
804 807 if (mutex_initted) {
805 808 mutex_destroy(&vhci->vhci_mutex);
806 809 }
807 810
808 811 ddi_soft_state_free(vhci_softstate, instance);
809 812 return (DDI_FAILURE);
810 813 }
811 814
812 815
813 816 /*ARGSUSED*/
814 817 static int
815 818 vhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
816 819 {
817 820 int instance = ddi_get_instance(dip);
818 821 scsi_hba_tran_t *tran;
819 822 struct scsi_vhci *vhci;
820 823
821 824 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_detach: cmd=0x%x\n", cmd));
822 825
823 826 if ((tran = ddi_get_driver_private(dip)) == NULL)
824 827 return (DDI_FAILURE);
825 828
826 829 vhci = TRAN2HBAPRIVATE(tran);
827 830 if (!vhci) {
828 831 return (DDI_FAILURE);
829 832 }
830 833
831 834 switch (cmd) {
832 835 case DDI_DETACH:
833 836 break;
834 837
835 838 case DDI_SUSPEND:
836 839 case DDI_PM_SUSPEND:
837 840 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_detach: suspend/pm not yet"
838 841 "implemented\n"));
839 842 return (DDI_FAILURE);
840 843
841 844 default:
842 845 VHCI_DEBUG(1, (CE_NOTE, NULL,
843 846 "!vhci_detach: unknown ddi command\n"));
844 847 return (DDI_FAILURE);
845 848 }
846 849
847 850 (void) mdi_vhci_unregister(dip, 0);
848 851 (void) scsi_hba_detach(dip);
849 852 scsi_hba_tran_free(tran);
850 853
851 854 if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
852 855 "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
853 856 cmn_err(CE_WARN,
854 857 "%s%d unable to remove prop pm-want_child_notification?",
855 858 ddi_driver_name(dip), ddi_get_instance(dip));
856 859 }
857 860 if (vhci_restart_timeid != 0) {
858 861 (void) untimeout(vhci_restart_timeid);
859 862 }
860 863 vhci_restart_timeid = 0;
861 864
862 865 mutex_destroy(&vhci->vhci_mutex);
863 866 vhci->vhci_dip = NULL;
864 867 vhci->vhci_tran = NULL;
865 868 taskq_destroy(vhci->vhci_taskq);
866 869 taskq_destroy(vhci->vhci_update_pathstates_taskq);
867 870 ddi_remove_minor_node(dip, NULL);
868 871 ddi_soft_state_free(vhci_softstate, instance);
869 872
870 873 vhci_failover_modclose(); /* unload failover modules */
871 874 return (DDI_SUCCESS);
872 875 }
873 876
874 877 /*
875 878 * vhci_getinfo()
876 879 * Given the device number, return the devinfo pointer or the
877 880 * instance number.
878 881 * Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
879 882 */
880 883
881 884 /*ARGSUSED*/
882 885 static int
883 886 vhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
884 887 {
885 888 struct scsi_vhci *vhcip;
886 889 int instance = MINOR2INST(getminor((dev_t)arg));
887 890
888 891 switch (cmd) {
889 892 case DDI_INFO_DEVT2DEVINFO:
890 893 vhcip = ddi_get_soft_state(vhci_softstate, instance);
891 894 if (vhcip != NULL)
892 895 *result = vhcip->vhci_dip;
893 896 else {
894 897 *result = NULL;
895 898 return (DDI_FAILURE);
896 899 }
897 900 break;
898 901
899 902 case DDI_INFO_DEVT2INSTANCE:
900 903 *result = (void *)(uintptr_t)instance;
901 904 break;
902 905
903 906 default:
904 907 return (DDI_FAILURE);
905 908 }
906 909
907 910 return (DDI_SUCCESS);
908 911 }
909 912
910 913 /*ARGSUSED*/
911 914 static int
912 915 vhci_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
913 916 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
914 917 {
915 918 char *guid;
916 919 scsi_vhci_lun_t *vlun;
917 920 struct scsi_vhci *vhci;
918 921 clock_t from_ticks;
919 922 mdi_pathinfo_t *pip;
920 923 int rval;
921 924
922 925 ASSERT(hba_dip != NULL);
923 926 ASSERT(tgt_dip != NULL);
924 927
925 928 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
926 929 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
927 930 /*
928 931 * This must be the .conf node without GUID property.
929 932 * The node under fp already inserts a delay, so we
930 933 * just return from here. We rely on this delay to have
931 934 * all dips be posted to the ndi hotplug thread's newdev
932 935 * list. This is necessary for the deferred attach
933 936 * mechanism to work and opens() done soon after boot to
934 937 * succeed.
935 938 */
936 939 VHCI_DEBUG(4, (CE_WARN, hba_dip, "tgt_init: lun guid "
937 940 "property failed"));
938 941 return (DDI_NOT_WELL_FORMED);
939 942 }
940 943
941 944 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
942 945 /*
943 946 * This must be .conf node with the GUID property. We don't
944 947 * merge property by ndi_merge_node() here because the
945 948 * devi_addr_buf of .conf node is "" always according the
946 949 * implementation of vhci_scsi_get_name_bus_addr().
947 950 */
948 951 ddi_set_name_addr(tgt_dip, NULL);
949 952 return (DDI_FAILURE);
950 953 }
951 954
952 955 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(hba_dip));
953 956 ASSERT(vhci != NULL);
954 957
955 958 VHCI_DEBUG(4, (CE_NOTE, hba_dip,
956 959 "!tgt_init: called for %s (instance %d)\n",
957 960 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip)));
958 961
959 962 vlun = vhci_lun_lookup(tgt_dip);
960 963
961 964 mutex_enter(&vhci_global_mutex);
962 965
963 966 from_ticks = ddi_get_lbolt();
964 967 if (vhci_to_ticks == 0) {
965 968 vhci_to_ticks = from_ticks +
966 969 drv_usectohz(vhci_init_wait_timeout);
967 970 }
968 971
969 972 #if DEBUG
970 973 if (vlun) {
971 974 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
972 975 "vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
973 976 "from_ticks %lx to_ticks %lx",
974 977 guid, (void *)vlun, from_ticks, vhci_to_ticks));
975 978 } else {
976 979 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
977 980 "vhci_scsi_tgt_init: guid %s : vlun not found "
978 981 "from_ticks %lx to_ticks %lx", guid, from_ticks,
979 982 vhci_to_ticks));
980 983 }
981 984 #endif
982 985
983 986 rval = mdi_select_path(tgt_dip, NULL,
984 987 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH), NULL, &pip);
985 988 if (rval == MDI_SUCCESS) {
986 989 mdi_rele_path(pip);
987 990 }
988 991
989 992 /*
990 993 * Wait for the following conditions :
991 994 * 1. no vlun available yet
992 995 * 2. no path established
993 996 * 3. timer did not expire
994 997 */
995 998 while ((vlun == NULL) || (mdi_client_get_path_count(tgt_dip) == 0) ||
996 999 (rval != MDI_SUCCESS)) {
997 1000 if (vlun && vlun->svl_not_supported) {
998 1001 VHCI_DEBUG(1, (CE_WARN, hba_dip, "tgt_init: "
999 1002 "vlun 0x%p lun guid %s not supported!",
1000 1003 (void *)vlun, guid));
1001 1004 mutex_exit(&vhci_global_mutex);
1002 1005 ddi_prop_free(guid);
1003 1006 return (DDI_NOT_WELL_FORMED);
1004 1007 }
1005 1008 if ((vhci_first_time == 0) && (from_ticks >= vhci_to_ticks)) {
1006 1009 vhci_first_time = 1;
1007 1010 }
1008 1011 if (vhci_first_time == 1) {
1009 1012 VHCI_DEBUG(1, (CE_WARN, hba_dip, "vhci_scsi_tgt_init: "
1010 1013 "no wait for %s. from_tick %lx, to_tick %lx",
1011 1014 guid, from_ticks, vhci_to_ticks));
1012 1015 mutex_exit(&vhci_global_mutex);
1013 1016 ddi_prop_free(guid);
1014 1017 return (DDI_NOT_WELL_FORMED);
1015 1018 }
1016 1019
1017 1020 if (cv_timedwait(&vhci_cv,
1018 1021 &vhci_global_mutex, vhci_to_ticks) == -1) {
1019 1022 /* Timed out */
1020 1023 #ifdef DEBUG
1021 1024 if (vlun == NULL) {
1022 1025 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1023 1026 "tgt_init: no vlun for %s!", guid));
1024 1027 } else if (mdi_client_get_path_count(tgt_dip) == 0) {
1025 1028 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1026 1029 "tgt_init: client path count is "
1027 1030 "zero for %s!", guid));
1028 1031 } else {
1029 1032 VHCI_DEBUG(1, (CE_WARN, hba_dip,
1030 1033 "tgt_init: client path not "
1031 1034 "available yet for %s!", guid));
1032 1035 }
1033 1036 #endif /* DEBUG */
1034 1037 mutex_exit(&vhci_global_mutex);
1035 1038 ddi_prop_free(guid);
1036 1039 return (DDI_NOT_WELL_FORMED);
1037 1040 }
1038 1041 vlun = vhci_lun_lookup(tgt_dip);
1039 1042 rval = mdi_select_path(tgt_dip, NULL,
1040 1043 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
1041 1044 NULL, &pip);
1042 1045 if (rval == MDI_SUCCESS) {
1043 1046 mdi_rele_path(pip);
1044 1047 }
1045 1048 from_ticks = ddi_get_lbolt();
1046 1049 }
1047 1050 mutex_exit(&vhci_global_mutex);
1048 1051
1049 1052 ASSERT(vlun != NULL);
1050 1053 ddi_prop_free(guid);
1051 1054
1052 1055 scsi_device_hba_private_set(sd, vlun);
1053 1056
1054 1057 return (DDI_SUCCESS);
1055 1058 }
1056 1059
1057 1060 /*ARGSUSED*/
1058 1061 static void
1059 1062 vhci_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1060 1063 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1061 1064 {
1062 1065 struct scsi_vhci_lun *dvlp;
1063 1066 ASSERT(mdi_client_get_path_count(tgt_dip) <= 0);
1064 1067 dvlp = (struct scsi_vhci_lun *)scsi_device_hba_private_get(sd);
1065 1068 ASSERT(dvlp != NULL);
1066 1069
1067 1070 vhci_lun_free(dvlp, sd);
1068 1071 }
1069 1072
1070 1073 /*
1071 1074 * a PGR register command has started; copy the info we need
1072 1075 */
1073 1076 int
1074 1077 vhci_pgr_register_start(scsi_vhci_lun_t *vlun, struct scsi_pkt *pkt)
1075 1078 {
1076 1079 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1077 1080 void *addr;
1078 1081
1079 1082 if (!vpkt->vpkt_tgt_init_bp)
1080 1083 return (TRAN_BADPKT);
1081 1084
1082 1085 addr = bp_mapin_common(vpkt->vpkt_tgt_init_bp,
1083 1086 (vpkt->vpkt_flags & CFLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
1084 1087 if (addr == NULL)
1085 1088 return (TRAN_BUSY);
1086 1089
1087 1090 mutex_enter(&vlun->svl_mutex);
1088 1091
1089 1092 vhci_print_prout_keys(vlun, "v_pgr_reg_start: before bcopy:");
1090 1093
1091 1094 bcopy(addr, &vlun->svl_prout, sizeof (vhci_prout_t) -
1092 1095 (2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
1093 1096 bcopy(pkt->pkt_cdbp, vlun->svl_cdb, sizeof (vlun->svl_cdb));
1094 1097
1095 1098 vhci_print_prout_keys(vlun, "v_pgr_reg_start: after bcopy:");
1096 1099
1097 1100 vlun->svl_time = pkt->pkt_time;
1098 1101 vlun->svl_bcount = vpkt->vpkt_tgt_init_bp->b_bcount;
1099 1102 vlun->svl_first_path = vpkt->vpkt_path;
1100 1103 mutex_exit(&vlun->svl_mutex);
1101 1104 return (0);
1102 1105 }
1103 1106
1104 1107 /*
1105 1108 * Function name : vhci_scsi_start()
1106 1109 *
1107 1110 * Return Values : TRAN_FATAL_ERROR - vhci has been shutdown
1108 1111 * or other fatal failure
1109 1112 * preventing packet transportation
1110 1113 * TRAN_BUSY - request queue is full
1111 1114 * TRAN_ACCEPT - pkt has been submitted to phci
1112 1115 * (or is held in the waitQ)
1113 1116 * Description : Implements SCSA's tran_start() entry point for
1114 1117 * packet transport
1115 1118 *
1116 1119 */
1117 1120 static int
1118 1121 vhci_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1119 1122 {
1120 1123 int rval = TRAN_ACCEPT;
1121 1124 int instance, held;
1122 1125 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1123 1126 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1124 1127 struct vhci_pkt *vpkt = TGTPKT2VHCIPKT(pkt);
1125 1128 int flags = 0;
1126 1129 scsi_vhci_priv_t *svp, *svp_resrv;
1127 1130 dev_info_t *cdip;
1128 1131 client_lb_t lbp;
1129 1132 int restore_lbp = 0;
1130 1133 /* set if pkt is SCSI-II RESERVE cmd */
1131 1134 int pkt_reserve_cmd = 0;
1132 1135 int reserve_failed = 0;
1133 1136 int resrv_instance = 0;
1134 1137 mdi_pathinfo_t *pip;
1135 1138 struct scsi_pkt *rel_pkt;
1136 1139
1137 1140 ASSERT(vhci != NULL);
1138 1141 ASSERT(vpkt != NULL);
1139 1142 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
1140 1143 cdip = ADDR2DIP(ap);
1141 1144
1142 1145 /*
1143 1146 * Block IOs if LUN is held or QUIESCED for IOs.
1144 1147 */
1145 1148 if ((VHCI_LUN_IS_HELD(vlun)) ||
1146 1149 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1147 1150 return (TRAN_BUSY);
1148 1151 }
1149 1152
1150 1153 /*
1151 1154 * vhci_lun needs to be quiesced before SCSI-II RESERVE command
1152 1155 * can be issued. This may require a cv_timedwait, which is
1153 1156 * dangerous to perform in an interrupt context. So if this
1154 1157 * is a RESERVE command a taskq is dispatched to service it.
1155 1158 * This taskq shall again call vhci_scsi_start, but we shall be
1156 1159 * sure its not in an interrupt context.
1157 1160 */
1158 1161 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
1159 1162 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
1160 1163 if (!(vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ)) {
1161 1164 if (taskq_dispatch(vhci->vhci_taskq,
1162 1165 vhci_dispatch_scsi_start, (void *) vpkt,
1163 1166 KM_NOSLEEP)) {
1164 1167 return (TRAN_ACCEPT);
1165 1168 } else {
1166 1169 return (TRAN_BUSY);
1167 1170 }
1168 1171 }
1169 1172
1170 1173 /*
1171 1174 * Here we ensure that simultaneous SCSI-II RESERVE cmds don't
1172 1175 * get serviced for a lun.
1173 1176 */
1174 1177 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
1175 1178 if (!held) {
1176 1179 return (TRAN_BUSY);
1177 1180 } else if ((vlun->svl_flags & VLUN_QUIESCED_FLG) ==
1178 1181 VLUN_QUIESCED_FLG) {
1179 1182 VHCI_RELEASE_LUN(vlun);
1180 1183 return (TRAN_BUSY);
1181 1184 }
1182 1185
1183 1186 /*
1184 1187 * To ensure that no IOs occur for this LUN for the duration
1185 1188 * of this pkt set the VLUN_QUIESCED_FLG.
1186 1189 * In case this routine needs to exit on error make sure that
1187 1190 * this flag is cleared.
1188 1191 */
1189 1192 vlun->svl_flags |= VLUN_QUIESCED_FLG;
1190 1193 pkt_reserve_cmd = 1;
1191 1194
1192 1195 /*
1193 1196 * if this is a SCSI-II RESERVE command, set load balancing
1194 1197 * policy to be ALTERNATE PATH to ensure that all subsequent
1195 1198 * IOs are routed on the same path. This is because if commands
1196 1199 * are routed across multiple paths then IOs on paths other than
1197 1200 * the one on which the RESERVE was executed will get a
1198 1201 * RESERVATION CONFLICT
1199 1202 */
1200 1203 lbp = mdi_get_lb_policy(cdip);
1201 1204 if (lbp != LOAD_BALANCE_NONE) {
1202 1205 if (vhci_quiesce_lun(vlun) != 1) {
1203 1206 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1204 1207 VHCI_RELEASE_LUN(vlun);
1205 1208 return (TRAN_FATAL_ERROR);
1206 1209 }
1207 1210 vlun->svl_lb_policy_save = lbp;
1208 1211 if (mdi_set_lb_policy(cdip, LOAD_BALANCE_NONE) !=
1209 1212 MDI_SUCCESS) {
1210 1213 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1211 1214 VHCI_RELEASE_LUN(vlun);
1212 1215 return (TRAN_FATAL_ERROR);
1213 1216 }
1214 1217 restore_lbp = 1;
1215 1218 }
1216 1219
1217 1220 VHCI_DEBUG(2, (CE_NOTE, vhci->vhci_dip,
1218 1221 "!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
1219 1222 "svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
1220 1223 (void *)vlun, (void *)vlun->svl_resrv_pip, vlun->svl_flags,
1221 1224 mdi_get_lb_policy(cdip)));
1222 1225
1223 1226 /*
1224 1227 * See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
1225 1228 * To narrow this window where a reserve command may be sent
1226 1229 * down an inactive path the path states first need to be
1227 1230 * updated. Before calling vhci_update_pathstates reset
1228 1231 * VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
1229 1232 * for this lun. This shall prevent an unnecessary reset
1230 1233 * from being sent out. Also remember currently reserved path
1231 1234 * just for a case the new reservation will go to another path.
1232 1235 */
1233 1236 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1234 1237 resrv_instance = mdi_pi_get_path_instance(
1235 1238 vlun->svl_resrv_pip);
1236 1239 }
1237 1240 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
1238 1241 vhci_update_pathstates((void *)vlun);
1239 1242 }
1240 1243
1241 1244 instance = ddi_get_instance(vhci->vhci_dip);
1242 1245
1243 1246 /*
1244 1247 * If the command is PRIN with action of zero, then the cmd
1245 1248 * is reading PR keys which requires filtering on completion.
1246 1249 * Data cache sync must be guaranteed.
1247 1250 */
1248 1251 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) && (pkt->pkt_cdbp[1] == 0) &&
1249 1252 (vpkt->vpkt_org_vpkt == NULL)) {
1250 1253 vpkt->vpkt_tgt_init_pkt_flags |= PKT_CONSISTENT;
1251 1254 }
1252 1255
1253 1256 /*
1254 1257 * Do not defer bind for PKT_DMA_PARTIAL
1255 1258 */
1256 1259 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1257 1260
1258 1261 /* This is a non pkt_dma_partial case */
1259 1262 if ((rval = vhci_bind_transport(
1260 1263 ap, vpkt, vpkt->vpkt_tgt_init_pkt_flags, NULL_FUNC))
1261 1264 != TRAN_ACCEPT) {
1262 1265 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1263 1266 "!vhci%d %x: failed to bind transport: "
1264 1267 "vlun 0x%p pkt_reserved %x restore_lbp %x,"
1265 1268 "lbp %x", instance, rval, (void *)vlun,
1266 1269 pkt_reserve_cmd, restore_lbp, lbp));
1267 1270 if (restore_lbp)
1268 1271 (void) mdi_set_lb_policy(cdip, lbp);
1269 1272 if (pkt_reserve_cmd)
1270 1273 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1271 1274 return (rval);
1272 1275 }
1273 1276 VHCI_DEBUG(8, (CE_NOTE, NULL,
1274 1277 "vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
1275 1278 }
1276 1279 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1277 1280 ASSERT(vpkt->vpkt_path != NULL);
1278 1281
1279 1282 /*
1280 1283 * This is the chance to adjust the pHCI's pkt and other information
1281 1284 * from target driver's pkt.
1282 1285 */
1283 1286 VHCI_DEBUG(8, (CE_NOTE, vhci->vhci_dip, "vhci_scsi_start vpkt %p\n",
1284 1287 (void *)vpkt));
1285 1288 vhci_update_pHCI_pkt(vpkt, pkt);
1286 1289
1287 1290 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
1288 1291 if (vpkt->vpkt_path != vlun->svl_resrv_pip) {
1289 1292 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1290 1293 "!vhci_bind: reserve flag set for vlun 0x%p, but, "
1291 1294 "pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
1292 1295 (void *)vlun, (void *)vpkt->vpkt_path,
1293 1296 (void *)vlun->svl_resrv_pip,
1294 1297 mdi_get_lb_policy(cdip)));
1295 1298 reserve_failed = 1;
1296 1299 }
1297 1300 }
1298 1301
1299 1302 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
1300 1303 if (svp == NULL || reserve_failed) {
1301 1304 if (pkt_reserve_cmd) {
1302 1305 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1303 1306 "!vhci_bind returned null svp vlun 0x%p",
1304 1307 (void *)vlun));
1305 1308 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1306 1309 if (restore_lbp)
1307 1310 (void) mdi_set_lb_policy(cdip, lbp);
1308 1311 }
1309 1312 pkt_cleanup:
1310 1313 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1311 1314 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1312 1315 vpkt->vpkt_hba_pkt = NULL;
1313 1316 if (vpkt->vpkt_path) {
1314 1317 mdi_rele_path(vpkt->vpkt_path);
1315 1318 vpkt->vpkt_path = NULL;
1316 1319 }
1317 1320 }
1318 1321 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1319 1322 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1320 1323 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1321 1324 sema_v(&vlun->svl_pgr_sema);
1322 1325 }
1323 1326 return (TRAN_BUSY);
1324 1327 }
1325 1328
1326 1329 if ((resrv_instance != 0) && (resrv_instance !=
1327 1330 mdi_pi_get_path_instance(vpkt->vpkt_path))) {
1328 1331 /*
1329 1332 * This is an attempt to reserve vpkt->vpkt_path. But the
1330 1333 * previously reserved path referred by resrv_instance might
1331 1334 * still be reserved. Hence we will send a release command
1332 1335 * there in order to avoid a reservation conflict.
1333 1336 */
1334 1337 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip, "!vhci_scsi_start: "
1335 1338 "conflicting reservation on another path, vlun 0x%p, "
1336 1339 "reserved instance %d, new instance: %d, pip: 0x%p",
1337 1340 (void *)vlun, resrv_instance,
1338 1341 mdi_pi_get_path_instance(vpkt->vpkt_path),
1339 1342 (void *)vpkt->vpkt_path));
1340 1343
1341 1344 /*
1342 1345 * In rare cases, the path referred by resrv_instance could
1343 1346 * disappear in the meantime. Calling mdi_select_path() below
1344 1347 * is an attempt to find out if the path still exists. It also
1345 1348 * ensures that the path will be held when the release is sent.
1346 1349 */
1347 1350 rval = mdi_select_path(cdip, NULL, MDI_SELECT_PATH_INSTANCE,
1348 1351 (void *)(intptr_t)resrv_instance, &pip);
1349 1352
1350 1353 if ((rval == MDI_SUCCESS) && (pip != NULL)) {
1351 1354 svp_resrv = (scsi_vhci_priv_t *)
1352 1355 mdi_pi_get_vhci_private(pip);
1353 1356 rel_pkt = scsi_init_pkt(&svp_resrv->svp_psd->sd_address,
1354 1357 NULL, NULL, CDB_GROUP0,
1355 1358 sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
1356 1359 NULL);
1357 1360
1358 1361 if (rel_pkt == NULL) {
1359 1362 char *p_path;
1360 1363
1361 1364 /*
1362 1365 * This is very unlikely.
1363 1366 * scsi_init_pkt(SLEEP_FUNC) does not fail
1364 1367 * because of resources. But in theory it could
1365 1368 * fail for some other reason. There is not an
1366 1369 * easy way how to recover though. Log a warning
1367 1370 * and return.
1368 1371 */
1369 1372 p_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1370 1373 vhci_log(CE_WARN, vhci->vhci_dip, "!Sending "
1371 1374 "RELEASE(6) to %s failed, a potential "
1372 1375 "reservation conflict ahead.",
1373 1376 ddi_pathname(mdi_pi_get_phci(pip), p_path));
1374 1377 kmem_free(p_path, MAXPATHLEN);
1375 1378
1376 1379 if (restore_lbp)
1377 1380 (void) mdi_set_lb_policy(cdip, lbp);
1378 1381
1379 1382 /* no need to check pkt_reserve_cmd here */
1380 1383 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1381 1384 return (TRAN_FATAL_ERROR);
1382 1385 }
1383 1386
1384 1387 rel_pkt->pkt_cdbp[0] = SCMD_RELEASE;
1385 1388 rel_pkt->pkt_time = 60;
1386 1389
1387 1390 /*
1388 1391 * Ignore the return value. If it will fail
1389 1392 * then most likely it is no longer reserved
1390 1393 * anyway.
1391 1394 */
1392 1395 (void) vhci_do_scsi_cmd(rel_pkt);
1393 1396 VHCI_DEBUG(1, (CE_NOTE, NULL,
1394 1397 "!vhci_scsi_start: path 0x%p, issued SCSI-2"
1395 1398 " RELEASE\n", (void *)pip));
1396 1399 scsi_destroy_pkt(rel_pkt);
1397 1400 mdi_rele_path(pip);
1398 1401 }
1399 1402 }
1400 1403
1401 1404 VHCI_INCR_PATH_CMDCOUNT(svp);
1402 1405
1403 1406 /*
1404 1407 * Ensure that no other IOs raced ahead, while a RESERVE cmd was
1405 1408 * QUIESCING the same lun.
1406 1409 */
1407 1410 if ((!pkt_reserve_cmd) &&
1408 1411 ((vlun->svl_flags & VLUN_QUIESCED_FLG) == VLUN_QUIESCED_FLG)) {
1409 1412 VHCI_DECR_PATH_CMDCOUNT(svp);
1410 1413 goto pkt_cleanup;
1411 1414 }
1412 1415
1413 1416 if ((pkt->pkt_cdbp[0] == SCMD_PRIN) ||
1414 1417 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1415 1418 /*
1416 1419 * currently this thread only handles running PGR
1417 1420 * commands, so don't bother creating it unless
1418 1421 * something interesting is going to happen (like
1419 1422 * either a PGR out, or a PGR in with enough space
1420 1423 * to hold the keys that are getting returned)
1421 1424 */
1422 1425 mutex_enter(&vlun->svl_mutex);
1423 1426 if (((vlun->svl_flags & VLUN_TASK_D_ALIVE_FLG) == 0) &&
1424 1427 (pkt->pkt_cdbp[0] == SCMD_PROUT)) {
1425 1428 vlun->svl_taskq = taskq_create("vlun_pgr_task_daemon",
1426 1429 1, MINCLSYSPRI, 1, 4, 0);
1427 1430 vlun->svl_flags |= VLUN_TASK_D_ALIVE_FLG;
1428 1431 }
1429 1432 mutex_exit(&vlun->svl_mutex);
1430 1433 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1431 1434 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1432 1435 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1433 1436 if (rval = vhci_pgr_register_start(vlun, pkt)) {
1434 1437 /* an error */
1435 1438 sema_v(&vlun->svl_pgr_sema);
1436 1439 return (rval);
1437 1440 }
1438 1441 }
1439 1442 }
1440 1443
1441 1444 /*
1442 1445 * SCSI-II RESERVE cmd is not expected in polled mode.
1443 1446 * If this changes it needs to be handled for the polled scenario.
1444 1447 */
1445 1448 flags = vpkt->vpkt_hba_pkt->pkt_flags;
1446 1449
1447 1450 /*
1448 1451 * Set the path_instance *before* sending the scsi_pkt down the path
1449 1452 * to mpxio's pHCI so that additional path abstractions at a pHCI
1450 1453 * level (like maybe iSCSI at some point in the future) can update
1451 1454 * the path_instance.
1452 1455 */
1453 1456 if (scsi_pkt_allocated_correctly(vpkt->vpkt_hba_pkt))
1454 1457 vpkt->vpkt_hba_pkt->pkt_path_instance =
1455 1458 mdi_pi_get_path_instance(vpkt->vpkt_path);
1456 1459
1457 1460 rval = scsi_transport(vpkt->vpkt_hba_pkt);
1458 1461 if (rval == TRAN_ACCEPT) {
1459 1462 if (flags & FLAG_NOINTR) {
1460 1463 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
1461 1464 struct scsi_pkt *pkt = vpkt->vpkt_hba_pkt;
1462 1465
1463 1466 ASSERT(tpkt != NULL);
1464 1467 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
1465 1468 tpkt->pkt_resid = pkt->pkt_resid;
1466 1469 tpkt->pkt_state = pkt->pkt_state;
1467 1470 tpkt->pkt_statistics = pkt->pkt_statistics;
1468 1471 tpkt->pkt_reason = pkt->pkt_reason;
1469 1472
1470 1473 if ((*(pkt->pkt_scbp) == STATUS_CHECK) &&
1471 1474 (pkt->pkt_state & STATE_ARQ_DONE)) {
1472 1475 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
1473 1476 vpkt->vpkt_tgt_init_scblen);
1474 1477 }
1475 1478
1476 1479 VHCI_DECR_PATH_CMDCOUNT(svp);
1477 1480 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1478 1481 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1479 1482 vpkt->vpkt_hba_pkt = NULL;
1480 1483 if (vpkt->vpkt_path) {
1481 1484 mdi_rele_path(vpkt->vpkt_path);
1482 1485 vpkt->vpkt_path = NULL;
1483 1486 }
1484 1487 }
1485 1488 /*
1486 1489 * This path will not automatically retry pkts
1487 1490 * internally, therefore, vpkt_org_vpkt should
1488 1491 * never be set.
1489 1492 */
1490 1493 ASSERT(vpkt->vpkt_org_vpkt == NULL);
1491 1494 scsi_hba_pkt_comp(tpkt);
1492 1495 }
1493 1496 return (rval);
1494 1497 } else if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
1495 1498 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
1496 1499 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
1497 1500 /* the command exited with bad status */
1498 1501 sema_v(&vlun->svl_pgr_sema);
1499 1502 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
1500 1503 /* the command exited with bad status */
1501 1504 sema_v(&vlun->svl_pgr_sema);
1502 1505 } else if (pkt_reserve_cmd) {
1503 1506 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1504 1507 "!vhci_scsi_start: reserve failed vlun 0x%p",
1505 1508 (void *)vlun));
1506 1509 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
1507 1510 if (restore_lbp)
1508 1511 (void) mdi_set_lb_policy(cdip, lbp);
1509 1512 }
1510 1513
1511 1514 ASSERT(vpkt->vpkt_hba_pkt != NULL);
1512 1515 VHCI_DECR_PATH_CMDCOUNT(svp);
1513 1516
1514 1517 /* Do not destroy phci packet information for PKT_DMA_PARTIAL */
1515 1518 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
1516 1519 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
1517 1520 vpkt->vpkt_hba_pkt = NULL;
1518 1521 if (vpkt->vpkt_path) {
1519 1522 MDI_PI_ERRSTAT(vpkt->vpkt_path, MDI_PI_TRANSERR);
1520 1523 mdi_rele_path(vpkt->vpkt_path);
1521 1524 vpkt->vpkt_path = NULL;
1522 1525 }
1523 1526 }
1524 1527 return (TRAN_BUSY);
1525 1528 }
1526 1529
1527 1530 /*
1528 1531 * Function name : vhci_scsi_reset()
1529 1532 *
1530 1533 * Return Values : 0 - reset failed
1531 1534 * 1 - reset succeeded
1532 1535 */
1533 1536
1534 1537 /* ARGSUSED */
1535 1538 static int
1536 1539 vhci_scsi_reset(struct scsi_address *ap, int level)
1537 1540 {
1538 1541 int rval = 0;
1539 1542
1540 1543 cmn_err(CE_WARN, "!vhci_scsi_reset 0x%x", level);
1541 1544 if ((level == RESET_TARGET) || (level == RESET_LUN)) {
1542 1545 return (vhci_scsi_reset_target(ap, level, TRUE));
1543 1546 } else if (level == RESET_ALL) {
1544 1547 return (vhci_scsi_reset_bus(ap));
1545 1548 }
1546 1549
1547 1550 return (rval);
1548 1551 }
1549 1552
1550 1553 /*
1551 1554 * vhci_recovery_reset:
1552 1555 * Issues reset to the device
1553 1556 * Input:
1554 1557 * vlun - vhci lun pointer of the device
1555 1558 * ap - address of the device
1556 1559 * select_path:
1557 1560 * If select_path is FALSE, then the address specified in ap is
1558 1561 * the path on which reset will be issued.
1559 1562 * If select_path is TRUE, then path is obtained by calling
1560 1563 * mdi_select_path.
1561 1564 *
1562 1565 * recovery_depth:
1563 1566 * Caller can specify the level of reset.
1564 1567 * VHCI_DEPTH_LUN -
1565 1568 * Issues LUN RESET if device supports lun reset.
1566 1569 * VHCI_DEPTH_TARGET -
1567 1570 * If Lun Reset fails or the device does not support
1568 1571 * Lun Reset, issues TARGET RESET
1569 1572 * VHCI_DEPTH_ALL -
1570 1573 * If Lun Reset fails or the device does not support
1571 1574 * Lun Reset, issues TARGET RESET.
1572 1575 * If TARGET RESET does not succeed, issues Bus Reset.
1573 1576 */
1574 1577
1575 1578 static int
1576 1579 vhci_recovery_reset(scsi_vhci_lun_t *vlun, struct scsi_address *ap,
1577 1580 uint8_t select_path, uint8_t recovery_depth)
1578 1581 {
1579 1582 int ret = 0;
1580 1583
1581 1584 ASSERT(ap != NULL);
1582 1585
1583 1586 if (vlun && vlun->svl_support_lun_reset == 1) {
1584 1587 ret = vhci_scsi_reset_target(ap, RESET_LUN,
1585 1588 select_path);
1586 1589 }
1587 1590
1588 1591 recovery_depth--;
1589 1592
1590 1593 if ((ret == 0) && recovery_depth) {
1591 1594 ret = vhci_scsi_reset_target(ap, RESET_TARGET,
1592 1595 select_path);
1593 1596 recovery_depth--;
1594 1597 }
1595 1598
1596 1599 if ((ret == 0) && recovery_depth) {
1597 1600 (void) scsi_reset(ap, RESET_ALL);
1598 1601 }
1599 1602
1600 1603 return (ret);
1601 1604 }
1602 1605
1603 1606 /*
1604 1607 * Note: The scsi_address passed to this routine could be the scsi_address
1605 1608 * for the virtual device or the physical device. No assumptions should be
1606 1609 * made in this routine about the contents of the ap structure.
1607 1610 * Further, note that the child dip would be the dip of the ssd node regardless
1608 1611 * of the scsi_address passed in.
1609 1612 */
1610 1613 static int
1611 1614 vhci_scsi_reset_target(struct scsi_address *ap, int level, uint8_t select_path)
1612 1615 {
1613 1616 dev_info_t *vdip, *cdip;
1614 1617 mdi_pathinfo_t *pip = NULL;
1615 1618 mdi_pathinfo_t *npip = NULL;
1616 1619 int rval = -1;
1617 1620 scsi_vhci_priv_t *svp = NULL;
1618 1621 struct scsi_address *pap = NULL;
1619 1622 scsi_hba_tran_t *hba = NULL;
1620 1623 int sps;
1621 1624 struct scsi_vhci *vhci = NULL;
1622 1625
1623 1626 if (select_path != TRUE) {
1624 1627 ASSERT(ap != NULL);
1625 1628 if (level == RESET_LUN) {
1626 1629 hba = ap->a_hba_tran;
1627 1630 ASSERT(hba != NULL);
1628 1631 return (hba->tran_reset(ap, RESET_LUN));
1629 1632 }
1630 1633 return (scsi_reset(ap, level));
1631 1634 }
1632 1635
1633 1636 cdip = ADDR2DIP(ap);
1634 1637 ASSERT(cdip != NULL);
1635 1638 vdip = ddi_get_parent(cdip);
1636 1639 ASSERT(vdip != NULL);
1637 1640 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
1638 1641 ASSERT(vhci != NULL);
1639 1642
1640 1643 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &pip);
1641 1644 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
1642 1645 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1643 1646 "Unable to get a path, dip 0x%p", (void *)cdip));
1644 1647 return (0);
1645 1648 }
1646 1649 again:
1647 1650 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
1648 1651 if (svp == NULL) {
1649 1652 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1650 1653 "priv is NULL, pip 0x%p", (void *)pip));
1651 1654 mdi_rele_path(pip);
1652 1655 return (0);
1653 1656 }
1654 1657
1655 1658 if (svp->svp_psd == NULL) {
1656 1659 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_scsi_reset_target: "
1657 1660 "psd is NULL, pip 0x%p, svp 0x%p",
1658 1661 (void *)pip, (void *)svp));
1659 1662 mdi_rele_path(pip);
1660 1663 return (0);
1661 1664 }
1662 1665
1663 1666 pap = &svp->svp_psd->sd_address;
1664 1667 hba = pap->a_hba_tran;
1665 1668
1666 1669 ASSERT(pap != NULL);
1667 1670 ASSERT(hba != NULL);
1668 1671
1669 1672 if (hba->tran_reset != NULL) {
1670 1673 if (hba->tran_reset(pap, level) == 0) {
1671 1674 vhci_log(CE_WARN, vdip, "!%s%d: "
1672 1675 "path %s, reset %d failed",
1673 1676 ddi_driver_name(cdip), ddi_get_instance(cdip),
1674 1677 mdi_pi_spathname(pip), level);
1675 1678
1676 1679 /*
1677 1680 * Select next path and issue the reset, repeat
1678 1681 * until all paths are exhausted
1679 1682 */
1680 1683 sps = mdi_select_path(cdip, NULL,
1681 1684 MDI_SELECT_ONLINE_PATH, pip, &npip);
1682 1685 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
1683 1686 mdi_rele_path(pip);
1684 1687 return (0);
1685 1688 }
1686 1689 mdi_rele_path(pip);
1687 1690 pip = npip;
1688 1691 goto again;
1689 1692 }
1690 1693 mdi_rele_path(pip);
1691 1694 mutex_enter(&vhci->vhci_mutex);
1692 1695 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
1693 1696 &vhci->vhci_reset_notify_listf);
1694 1697 mutex_exit(&vhci->vhci_mutex);
1695 1698 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_scsi_reset_target: "
1696 1699 "reset %d sent down pip:%p for cdip:%p\n", level,
1697 1700 (void *)pip, (void *)cdip));
1698 1701 return (1);
1699 1702 }
1700 1703 mdi_rele_path(pip);
1701 1704 return (0);
1702 1705 }
1703 1706
1704 1707
1705 1708 /* ARGSUSED */
1706 1709 static int
1707 1710 vhci_scsi_reset_bus(struct scsi_address *ap)
1708 1711 {
1709 1712 return (1);
1710 1713 }
1711 1714
1712 1715
1713 1716 /*
1714 1717 * called by vhci_getcap and vhci_setcap to get and set (respectively)
1715 1718 * SCSI capabilities
1716 1719 */
1717 1720 /* ARGSUSED */
1718 1721 static int
1719 1722 vhci_commoncap(struct scsi_address *ap, char *cap,
1720 1723 int val, int tgtonly, int doset)
1721 1724 {
1722 1725 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1723 1726 struct scsi_vhci_lun *vlun = ADDR2VLUN(ap);
1724 1727 int cidx;
1725 1728 int rval = 0;
1726 1729
1727 1730 if (cap == (char *)0) {
1728 1731 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1729 1732 "!vhci_commoncap: invalid arg"));
1730 1733 return (rval);
1731 1734 }
1732 1735
1733 1736 if (vlun == NULL) {
1734 1737 VHCI_DEBUG(3, (CE_WARN, vhci->vhci_dip,
1735 1738 "!vhci_commoncap: vlun is null"));
1736 1739 return (rval);
1737 1740 }
1738 1741
1739 1742 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
1740 1743 return (UNDEFINED);
1741 1744 }
1742 1745
1743 1746 /*
1744 1747 * Process setcap request.
1745 1748 */
1746 1749 if (doset) {
1747 1750 /*
1748 1751 * At present, we can only set binary (0/1) values
1749 1752 */
1750 1753 switch (cidx) {
1751 1754 case SCSI_CAP_ARQ:
1752 1755 if (val == 0) {
1753 1756 rval = 0;
1754 1757 } else {
1755 1758 rval = 1;
1756 1759 }
1757 1760 break;
1758 1761
1759 1762 case SCSI_CAP_LUN_RESET:
1760 1763 if (tgtonly == 0) {
1761 1764 VHCI_DEBUG(1, (CE_WARN, vhci->vhci_dip,
1762 1765 "scsi_vhci_setcap: "
1763 1766 "Returning error since whom = 0"));
1764 1767 rval = -1;
1765 1768 break;
1766 1769 }
1767 1770 /*
1768 1771 * Set the capability accordingly.
1769 1772 */
1770 1773 mutex_enter(&vlun->svl_mutex);
1771 1774 vlun->svl_support_lun_reset = val;
1772 1775 rval = val;
1773 1776 mutex_exit(&vlun->svl_mutex);
1774 1777 break;
1775 1778
1776 1779 case SCSI_CAP_SECTOR_SIZE:
1777 1780 mutex_enter(&vlun->svl_mutex);
1778 1781 vlun->svl_sector_size = val;
1779 1782 vlun->svl_setcap_done = 1;
1780 1783 mutex_exit(&vlun->svl_mutex);
1781 1784 (void) vhci_pHCI_cap(ap, cap, val, tgtonly, NULL);
1782 1785
1783 1786 /* Always return success */
1784 1787 rval = 1;
1785 1788 break;
1786 1789
1787 1790 default:
1788 1791 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1789 1792 "!vhci_setcap: unsupported %d", cidx));
1790 1793 rval = UNDEFINED;
1791 1794 break;
1792 1795 }
1793 1796
1794 1797 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1795 1798 "!set cap: cap=%s, val/tgtonly/doset/rval = "
1796 1799 "0x%x/0x%x/0x%x/%d\n",
1797 1800 cap, val, tgtonly, doset, rval));
1798 1801
1799 1802 } else {
1800 1803 /*
1801 1804 * Process getcap request.
1802 1805 */
1803 1806 switch (cidx) {
1804 1807 case SCSI_CAP_DMA_MAX:
1805 1808 /*
1806 1809 * For X86 this capability is caught in scsi_ifgetcap().
1807 1810 * XXX Should this be getting the value from the pHCI?
1808 1811 */
1809 1812 rval = (int)VHCI_DMA_MAX_XFER_CAP;
1810 1813 break;
1811 1814
1812 1815 case SCSI_CAP_INITIATOR_ID:
1813 1816 rval = 0x00;
1814 1817 break;
1815 1818
1816 1819 case SCSI_CAP_ARQ:
1817 1820 case SCSI_CAP_RESET_NOTIFICATION:
1818 1821 case SCSI_CAP_TAGGED_QING:
1819 1822 rval = 1;
1820 1823 break;
1821 1824
1822 1825 case SCSI_CAP_SCSI_VERSION:
1823 1826 rval = 3;
1824 1827 break;
1825 1828
1826 1829 case SCSI_CAP_INTERCONNECT_TYPE:
1827 1830 rval = INTERCONNECT_FABRIC;
1828 1831 break;
1829 1832
1830 1833 case SCSI_CAP_LUN_RESET:
1831 1834 /*
1832 1835 * scsi_vhci will always return success for LUN reset.
1833 1836 * When request for doing LUN reset comes
1834 1837 * through scsi_reset entry point, at that time attempt
1835 1838 * will be made to do reset through all the possible
1836 1839 * paths.
1837 1840 */
1838 1841 mutex_enter(&vlun->svl_mutex);
1839 1842 rval = vlun->svl_support_lun_reset;
1840 1843 mutex_exit(&vlun->svl_mutex);
1841 1844 VHCI_DEBUG(4, (CE_WARN, vhci->vhci_dip,
1842 1845 "scsi_vhci_getcap:"
1843 1846 "Getting the Lun reset capability %d", rval));
1844 1847 break;
1845 1848
1846 1849 case SCSI_CAP_SECTOR_SIZE:
1847 1850 mutex_enter(&vlun->svl_mutex);
1848 1851 rval = vlun->svl_sector_size;
1849 1852 mutex_exit(&vlun->svl_mutex);
1850 1853 break;
1851 1854
1852 1855 case SCSI_CAP_CDB_LEN:
1853 1856 rval = VHCI_SCSI_CDB_SIZE;
1854 1857 break;
1855 1858
1856 1859 case SCSI_CAP_DMA_MAX_ARCH:
1857 1860 /*
1858 1861 * For X86 this capability is caught in scsi_ifgetcap().
1859 1862 * XXX Should this be getting the value from the pHCI?
1860 1863 */
1861 1864 rval = 0;
1862 1865 break;
1863 1866
1864 1867 default:
1865 1868 VHCI_DEBUG(6, (CE_WARN, vhci->vhci_dip,
1866 1869 "!vhci_getcap: unsupported %d", cidx));
1867 1870 rval = UNDEFINED;
1868 1871 break;
1869 1872 }
1870 1873
1871 1874 VHCI_DEBUG(6, (CE_NOTE, vhci->vhci_dip,
1872 1875 "!get cap: cap=%s, val/tgtonly/doset/rval = "
1873 1876 "0x%x/0x%x/0x%x/%d\n",
1874 1877 cap, val, tgtonly, doset, rval));
1875 1878 }
1876 1879 return (rval);
1877 1880 }
1878 1881
1879 1882
1880 1883 /*
1881 1884 * Function name : vhci_scsi_getcap()
1882 1885 *
1883 1886 */
1884 1887 static int
1885 1888 vhci_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
1886 1889 {
1887 1890 return (vhci_commoncap(ap, cap, 0, whom, 0));
1888 1891 }
1889 1892
1890 1893 static int
1891 1894 vhci_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1892 1895 {
1893 1896 return (vhci_commoncap(ap, cap, value, whom, 1));
1894 1897 }
1895 1898
1896 1899 /*
1897 1900 * Function name : vhci_scsi_abort()
1898 1901 */
1899 1902 /* ARGSUSED */
1900 1903 static int
1901 1904 vhci_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1902 1905 {
1903 1906 return (0);
1904 1907 }
1905 1908
1906 1909 /*
1907 1910 * Function name : vhci_scsi_init_pkt
1908 1911 *
1909 1912 * Return Values : pointer to scsi_pkt, or NULL
1910 1913 */
1911 1914 /* ARGSUSED */
1912 1915 static struct scsi_pkt *
1913 1916 vhci_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1914 1917 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1915 1918 int flags, int (*callback)(caddr_t), caddr_t arg)
1916 1919 {
1917 1920 struct scsi_vhci *vhci = ADDR2VHCI(ap);
1918 1921 struct vhci_pkt *vpkt;
1919 1922 int rval;
1920 1923 int newpkt = 0;
1921 1924 struct scsi_pkt *pktp;
1922 1925
1923 1926
1924 1927 if (pkt == NULL) {
1925 1928 if (cmdlen > VHCI_SCSI_CDB_SIZE) {
1926 1929 if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
1927 1930 ((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
1928 1931 VHCI_SCSI_OSD_PKT_FLAGS)) {
1929 1932 VHCI_DEBUG(1, (CE_NOTE, NULL,
1930 1933 "!init pkt: cdb size not supported\n"));
1931 1934 return (NULL);
1932 1935 }
1933 1936 }
1934 1937
1935 1938 pktp = scsi_hba_pkt_alloc(vhci->vhci_dip,
1936 1939 ap, cmdlen, statuslen, tgtlen, sizeof (*vpkt), callback,
1937 1940 arg);
1938 1941
1939 1942 if (pktp == NULL) {
1940 1943 return (NULL);
1941 1944 }
1942 1945
1943 1946 /* Get the vhci's private structure */
1944 1947 vpkt = (struct vhci_pkt *)(pktp->pkt_ha_private);
1945 1948 ASSERT(vpkt);
1946 1949
1947 1950 /* Save the target driver's packet */
1948 1951 vpkt->vpkt_tgt_pkt = pktp;
1949 1952
1950 1953 /*
1951 1954 * Save pkt_tgt_init_pkt fields if deferred binding
1952 1955 * is needed or for other purposes.
1953 1956 */
1954 1957 vpkt->vpkt_tgt_init_pkt_flags = flags;
1955 1958 vpkt->vpkt_flags = (callback == NULL_FUNC) ? CFLAG_NOWAIT : 0;
1956 1959 vpkt->vpkt_state = VHCI_PKT_IDLE;
1957 1960 vpkt->vpkt_tgt_init_cdblen = cmdlen;
1958 1961 vpkt->vpkt_tgt_init_scblen = statuslen;
1959 1962 newpkt = 1;
1960 1963 } else { /* pkt not NULL */
1961 1964 vpkt = pkt->pkt_ha_private;
1962 1965 }
1963 1966
1964 1967 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_scsi_init_pkt "
1965 1968 "vpkt %p flags %x\n", (void *)vpkt, flags));
1966 1969
1967 1970 /* Clear any stale error flags */
1968 1971 if (bp) {
1969 1972 bioerror(bp, 0);
1970 1973 }
1971 1974
1972 1975 vpkt->vpkt_tgt_init_bp = bp;
1973 1976
1974 1977 if (flags & PKT_DMA_PARTIAL) {
1975 1978
1976 1979 /*
1977 1980 * Immediate binding is needed.
1978 1981 * Target driver may not set this flag in next invocation.
1979 1982 * vhci has to remember this flag was set during first
1980 1983 * invocation of vhci_scsi_init_pkt.
1981 1984 */
1982 1985 vpkt->vpkt_flags |= CFLAG_DMA_PARTIAL;
1983 1986 }
1984 1987
1985 1988 if (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) {
1986 1989
1987 1990 /*
1988 1991 * Re-initialize some of the target driver packet state
1989 1992 * information.
1990 1993 */
1991 1994 vpkt->vpkt_tgt_pkt->pkt_state = 0;
1992 1995 vpkt->vpkt_tgt_pkt->pkt_statistics = 0;
1993 1996 vpkt->vpkt_tgt_pkt->pkt_reason = 0;
1994 1997
1995 1998 /*
1996 1999 * Binding a vpkt->vpkt_path for this IO at init_time.
1997 2000 * If an IO error happens later, target driver will clear
1998 2001 * this vpkt->vpkt_path binding before re-init IO again.
1999 2002 */
2000 2003 VHCI_DEBUG(8, (CE_NOTE, NULL,
2001 2004 "vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
2002 2005 (void *)vpkt, newpkt));
2003 2006 if (pkt && vpkt->vpkt_hba_pkt) {
2004 2007 VHCI_DEBUG(4, (CE_NOTE, NULL,
2005 2008 "v_s_i_p calling update_pHCI_pkt resid %ld\n",
2006 2009 pkt->pkt_resid));
2007 2010 vhci_update_pHCI_pkt(vpkt, pkt);
2008 2011 }
2009 2012 if (callback == SLEEP_FUNC) {
2010 2013 rval = vhci_bind_transport(
2011 2014 ap, vpkt, flags, callback);
2012 2015 } else {
2013 2016 rval = vhci_bind_transport(
2014 2017 ap, vpkt, flags, NULL_FUNC);
2015 2018 }
2016 2019 VHCI_DEBUG(8, (CE_NOTE, NULL,
2017 2020 "vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
2018 2021 (void *)vpkt, rval));
2019 2022 if (bp) {
2020 2023 if (rval == TRAN_FATAL_ERROR) {
2021 2024 /*
2022 2025 * No paths available. Could not bind
2023 2026 * any pHCI. Setting EFAULT as a way
2024 2027 * to indicate no DMA is mapped.
2025 2028 */
2026 2029 bioerror(bp, EFAULT);
2027 2030 } else {
2028 2031 /*
2029 2032 * Do not indicate any pHCI errors to
2030 2033 * target driver otherwise.
2031 2034 */
2032 2035 bioerror(bp, 0);
2033 2036 }
2034 2037 }
2035 2038 if (rval != TRAN_ACCEPT) {
2036 2039 VHCI_DEBUG(8, (CE_NOTE, NULL,
2037 2040 "vhci_scsi_init_pkt: "
2038 2041 "v_b_t failed 0x%p newpkt %x\n",
2039 2042 (void *)vpkt, newpkt));
2040 2043 if (newpkt) {
2041 2044 scsi_hba_pkt_free(ap,
2042 2045 vpkt->vpkt_tgt_pkt);
2043 2046 }
2044 2047 return (NULL);
2045 2048 }
2046 2049 ASSERT(vpkt->vpkt_hba_pkt != NULL);
2047 2050 ASSERT(vpkt->vpkt_path != NULL);
2048 2051
2049 2052 /* Update the resid for the target driver */
2050 2053 vpkt->vpkt_tgt_pkt->pkt_resid =
2051 2054 vpkt->vpkt_hba_pkt->pkt_resid;
2052 2055 }
2053 2056
2054 2057 return (vpkt->vpkt_tgt_pkt);
2055 2058 }
2056 2059
2057 2060 /*
2058 2061 * Function name : vhci_scsi_destroy_pkt
2059 2062 *
2060 2063 * Return Values : none
2061 2064 */
2062 2065 static void
2063 2066 vhci_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2064 2067 {
2065 2068 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2066 2069
2067 2070 VHCI_DEBUG(8, (CE_NOTE, NULL,
2068 2071 "vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
2069 2072
2070 2073 vpkt->vpkt_tgt_init_pkt_flags = 0;
2071 2074 if (vpkt->vpkt_hba_pkt) {
2072 2075 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2073 2076 vpkt->vpkt_hba_pkt = NULL;
2074 2077 }
2075 2078 if (vpkt->vpkt_path) {
2076 2079 mdi_rele_path(vpkt->vpkt_path);
2077 2080 vpkt->vpkt_path = NULL;
2078 2081 }
2079 2082
2080 2083 ASSERT(vpkt->vpkt_state != VHCI_PKT_ISSUED);
2081 2084 scsi_hba_pkt_free(ap, vpkt->vpkt_tgt_pkt);
2082 2085 }
2083 2086
2084 2087 /*
2085 2088 * Function name : vhci_scsi_dmafree()
2086 2089 *
2087 2090 * Return Values : none
2088 2091 */
2089 2092 /*ARGSUSED*/
2090 2093 static void
2091 2094 vhci_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2092 2095 {
2093 2096 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2094 2097
2095 2098 VHCI_DEBUG(6, (CE_NOTE, NULL,
2096 2099 "vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
2097 2100
2098 2101 ASSERT(vpkt != NULL);
2099 2102 if (vpkt->vpkt_hba_pkt) {
2100 2103 scsi_destroy_pkt(vpkt->vpkt_hba_pkt);
2101 2104 vpkt->vpkt_hba_pkt = NULL;
2102 2105 }
2103 2106 if (vpkt->vpkt_path) {
2104 2107 mdi_rele_path(vpkt->vpkt_path);
2105 2108 vpkt->vpkt_path = NULL;
2106 2109 }
2107 2110 }
2108 2111
2109 2112 /*
2110 2113 * Function name : vhci_scsi_sync_pkt()
2111 2114 *
2112 2115 * Return Values : none
2113 2116 */
2114 2117 /*ARGSUSED*/
2115 2118 static void
2116 2119 vhci_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2117 2120 {
2118 2121 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_ha_private;
2119 2122
2120 2123 ASSERT(vpkt != NULL);
2121 2124 if (vpkt->vpkt_hba_pkt) {
2122 2125 scsi_sync_pkt(vpkt->vpkt_hba_pkt);
2123 2126 }
2124 2127 }
2125 2128
2126 2129 /*
2127 2130 * routine for reset notification setup, to register or cancel.
2128 2131 */
2129 2132 static int
2130 2133 vhci_scsi_reset_notify(struct scsi_address *ap, int flag,
2131 2134 void (*callback)(caddr_t), caddr_t arg)
2132 2135 {
2133 2136 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2134 2137 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
2135 2138 &vhci->vhci_mutex, &vhci->vhci_reset_notify_listf));
2136 2139 }
2137 2140
2138 2141 static int
2139 2142 vhci_scsi_get_name_bus_addr(struct scsi_device *sd,
2140 2143 char *name, int len, int bus_addr)
2141 2144 {
2142 2145 dev_info_t *cdip;
2143 2146 char *guid;
2144 2147 scsi_vhci_lun_t *vlun;
2145 2148
2146 2149 ASSERT(sd != NULL);
2147 2150 ASSERT(name != NULL);
2148 2151
2149 2152 *name = 0;
2150 2153 cdip = sd->sd_dev;
2151 2154
2152 2155 ASSERT(cdip != NULL);
2153 2156
2154 2157 if (mdi_component_is_client(cdip, NULL) != MDI_SUCCESS)
2155 2158 return (1);
2156 2159
2157 2160 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
2158 2161 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS)
2159 2162 return (1);
2160 2163
2161 2164 /*
2162 2165 * Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
2163 2166 * <guid> bus_addr argument == 0
2164 2167 * <bus_addr> bus_addr argument != 0
2165 2168 * Since the <guid> is already provided with unit-address, we just
2166 2169 * provide failover module in <bus_addr> to keep output shorter.
2167 2170 */
2168 2171 vlun = ADDR2VLUN(&sd->sd_address);
2169 2172 if (bus_addr == 0) {
2170 2173 /* report the guid: */
2171 2174 (void) snprintf(name, len, "g%s", guid);
2172 2175 } else if (vlun && vlun->svl_fops_name) {
2173 2176 /* report the name of the failover module */
2174 2177 (void) snprintf(name, len, "%s", vlun->svl_fops_name);
2175 2178 }
2176 2179
2177 2180 ddi_prop_free(guid);
2178 2181 return (1);
2179 2182 }
2180 2183
2181 2184 static int
2182 2185 vhci_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
2183 2186 {
2184 2187 return (vhci_scsi_get_name_bus_addr(sd, name, len, 1));
2185 2188 }
2186 2189
2187 2190 static int
2188 2191 vhci_scsi_get_name(struct scsi_device *sd, char *name, int len)
2189 2192 {
2190 2193 return (vhci_scsi_get_name_bus_addr(sd, name, len, 0));
2191 2194 }
2192 2195
2193 2196 /*
2194 2197 * Return a pointer to the guid part of the devnm.
2195 2198 * devnm format is "nodename@busaddr", busaddr format is "gGUID".
2196 2199 */
2197 2200 static char *
2198 2201 vhci_devnm_to_guid(char *devnm)
2199 2202 {
2200 2203 char *cp = devnm;
2201 2204
2202 2205 if (devnm == NULL)
2203 2206 return (NULL);
2204 2207
2205 2208 while (*cp != '\0' && *cp != '@')
2206 2209 cp++;
2207 2210 if (*cp == '@' && *(cp + 1) == 'g')
2208 2211 return (cp + 2);
2209 2212 return (NULL);
2210 2213 }
2211 2214
2212 2215 static int
2213 2216 vhci_bind_transport(struct scsi_address *ap, struct vhci_pkt *vpkt, int flags,
2214 2217 int (*func)(caddr_t))
2215 2218 {
2216 2219 struct scsi_vhci *vhci = ADDR2VHCI(ap);
2217 2220 dev_info_t *cdip = ADDR2DIP(ap);
2218 2221 mdi_pathinfo_t *pip = NULL;
↓ open down ↓ |
2185 lines elided |
↑ open up ↑ |
2219 2222 mdi_pathinfo_t *npip = NULL;
2220 2223 scsi_vhci_priv_t *svp = NULL;
2221 2224 struct scsi_device *psd = NULL;
2222 2225 struct scsi_address *address = NULL;
2223 2226 struct scsi_pkt *pkt = NULL;
2224 2227 int rval = -1;
2225 2228 int pgr_sema_held = 0;
2226 2229 int held;
2227 2230 int mps_flag = MDI_SELECT_ONLINE_PATH;
2228 2231 struct scsi_vhci_lun *vlun;
2229 - time_t tnow;
2230 2232 int path_instance = 0;
2231 2233
2232 2234 vlun = ADDR2VLUN(ap);
2233 2235 ASSERT(vlun != 0);
2234 2236
2235 2237 if ((vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PROUT) &&
2236 2238 (((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2237 2239 VHCI_PROUT_REGISTER) ||
2238 2240 ((vpkt->vpkt_tgt_pkt->pkt_cdbp[1] & 0x1f) ==
2239 2241 VHCI_PROUT_R_AND_IGNORE))) {
2240 2242 if (!sema_tryp(&vlun->svl_pgr_sema))
2241 2243 return (TRAN_BUSY);
2242 2244 pgr_sema_held = 1;
2243 2245 if (vlun->svl_first_path != NULL) {
2244 2246 rval = mdi_select_path(cdip, NULL,
2245 2247 MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH,
2246 2248 NULL, &pip);
2247 2249 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2248 2250 VHCI_DEBUG(4, (CE_NOTE, NULL,
2249 2251 "vhci_bind_transport: path select fail\n"));
2250 2252 } else {
2251 2253 npip = pip;
2252 2254 do {
2253 2255 if (npip == vlun->svl_first_path) {
2254 2256 VHCI_DEBUG(4, (CE_NOTE, NULL,
2255 2257 "vhci_bind_transport: "
2256 2258 "valid first path 0x%p\n",
2257 2259 (void *)
2258 2260 vlun->svl_first_path));
2259 2261 pip = vlun->svl_first_path;
2260 2262 goto bind_path;
2261 2263 }
2262 2264 pip = npip;
2263 2265 rval = mdi_select_path(cdip, NULL,
2264 2266 MDI_SELECT_ONLINE_PATH |
2265 2267 MDI_SELECT_STANDBY_PATH,
2266 2268 pip, &npip);
2267 2269 mdi_rele_path(pip);
2268 2270 } while ((rval == MDI_SUCCESS) &&
2269 2271 (npip != NULL));
2270 2272 }
2271 2273 }
2272 2274
2273 2275 if (vlun->svl_first_path) {
2274 2276 VHCI_DEBUG(4, (CE_NOTE, NULL,
2275 2277 "vhci_bind_transport: invalid first path 0x%p\n",
2276 2278 (void *)vlun->svl_first_path));
2277 2279 vlun->svl_first_path = NULL;
2278 2280 }
2279 2281 } else if (vpkt->vpkt_tgt_pkt->pkt_cdbp[0] == SCMD_PRIN) {
2280 2282 if ((vpkt->vpkt_state & VHCI_PKT_THRU_TASKQ) == 0) {
2281 2283 if (!sema_tryp(&vlun->svl_pgr_sema))
2282 2284 return (TRAN_BUSY);
2283 2285 }
2284 2286 pgr_sema_held = 1;
2285 2287 }
2286 2288
2287 2289 /*
2288 2290 * If the path is already bound for PKT_PARTIAL_DMA case,
2289 2291 * try to use the same path.
2290 2292 */
2291 2293 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) && vpkt->vpkt_path) {
2292 2294 VHCI_DEBUG(4, (CE_NOTE, NULL,
2293 2295 "vhci_bind_transport: PKT_PARTIAL_DMA "
2294 2296 "vpkt 0x%p, path 0x%p\n",
2295 2297 (void *)vpkt, (void *)vpkt->vpkt_path));
2296 2298 pip = vpkt->vpkt_path;
2297 2299 goto bind_path;
2298 2300 }
2299 2301
2300 2302 /*
2301 2303 * Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
2302 2304 * indicates that mdi_select_path should be called to select a
2303 2305 * specific instance.
2304 2306 *
2305 2307 * NB: Condition pkt_path_instance reference on proper allocation.
2306 2308 */
2307 2309 if ((vpkt->vpkt_tgt_pkt->pkt_flags & FLAG_PKT_PATH_INSTANCE) &&
2308 2310 scsi_pkt_allocated_correctly(vpkt->vpkt_tgt_pkt)) {
2309 2311 path_instance = vpkt->vpkt_tgt_pkt->pkt_path_instance;
2310 2312 }
2311 2313
2312 2314 /*
2313 2315 * If reservation is active bind the transport directly to the pip
2314 2316 * with the reservation.
2315 2317 */
2316 2318 if (vpkt->vpkt_hba_pkt == NULL) {
2317 2319 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
2318 2320 if (MDI_PI_IS_ONLINE(vlun->svl_resrv_pip)) {
2319 2321 pip = vlun->svl_resrv_pip;
2320 2322 mdi_hold_path(pip);
2321 2323 vlun->svl_waiting_for_activepath = 0;
2322 2324 rval = MDI_SUCCESS;
2323 2325 goto bind_path;
2324 2326 } else {
2325 2327 if (pgr_sema_held) {
2326 2328 sema_v(&vlun->svl_pgr_sema);
2327 2329 }
2328 2330 return (TRAN_BUSY);
2329 2331 }
2330 2332 }
2331 2333 try_again:
2332 2334 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2333 2335 path_instance ? MDI_SELECT_PATH_INSTANCE : 0,
2334 2336 (void *)(intptr_t)path_instance, &pip);
2335 2337 if (rval == MDI_BUSY) {
2336 2338 if (pgr_sema_held) {
2337 2339 sema_v(&vlun->svl_pgr_sema);
2338 2340 }
2339 2341 return (TRAN_BUSY);
2340 2342 } else if (rval == MDI_DEVI_ONLINING) {
2341 2343 /*
2342 2344 * if we are here then we are in the midst of
2343 2345 * an attach/probe of the client device.
2344 2346 * We attempt to bind to ONLINE path if available,
2345 2347 * else it is OK to bind to a STANDBY path (instead
2346 2348 * of triggering a failover) because IO associated
2347 2349 * with attach/probe (eg. INQUIRY, block 0 read)
2348 2350 * are completed by targets even on passive paths
2349 2351 * If no ONLINE paths available, it is important
2350 2352 * to set svl_waiting_for_activepath for two
2351 2353 * reasons: (1) avoid sense analysis in the
2352 2354 * "external failure detection" codepath in
2353 2355 * vhci_intr(). Failure to do so will result in
2354 2356 * infinite loop (unless an ONLINE path becomes
2355 2357 * available at some point) (2) avoid
2356 2358 * unnecessary failover (see "---Waiting For Active
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
2357 2359 * Path---" comment below).
2358 2360 */
2359 2361 VHCI_DEBUG(1, (CE_NOTE, NULL, "!%p in onlining "
2360 2362 "state\n", (void *)cdip));
2361 2363 pip = NULL;
2362 2364 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2363 2365 mps_flag, NULL, &pip);
2364 2366 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2365 2367 if (vlun->svl_waiting_for_activepath == 0) {
2366 2368 vlun->svl_waiting_for_activepath = 1;
2367 - vlun->svl_wfa_time = ddi_get_time();
2369 + vlun->svl_wfa_time = gethrtime();
2368 2370 }
2369 2371 mps_flag |= MDI_SELECT_STANDBY_PATH;
2370 2372 rval = mdi_select_path(cdip,
2371 2373 vpkt->vpkt_tgt_init_bp,
2372 2374 mps_flag, NULL, &pip);
2373 2375 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
2374 2376 if (pgr_sema_held) {
2375 2377 sema_v(&vlun->svl_pgr_sema);
2376 2378 }
2377 2379 return (TRAN_FATAL_ERROR);
2378 2380 }
2379 2381 goto bind_path;
2380 2382 }
2381 2383 } else if ((rval == MDI_FAILURE) ||
2382 2384 ((rval == MDI_NOPATH) && (path_instance))) {
2383 2385 if (pgr_sema_held) {
2384 2386 sema_v(&vlun->svl_pgr_sema);
2385 2387 }
2386 2388 return (TRAN_FATAL_ERROR);
2387 2389 }
2388 2390
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
2389 2391 if ((pip == NULL) || (rval == MDI_NOPATH)) {
2390 2392 while (vlun->svl_waiting_for_activepath) {
2391 2393 /*
2392 2394 * ---Waiting For Active Path---
2393 2395 * This device was discovered across a
2394 2396 * passive path; lets wait for a little
2395 2397 * bit, hopefully an active path will
2396 2398 * show up obviating the need for a
2397 2399 * failover
2398 2400 */
2399 - tnow = ddi_get_time();
2400 - if (tnow - vlun->svl_wfa_time >= 60) {
2401 + if ((gethrtime() - vlun->svl_wfa_time) >=
2402 + (60 * NANOSEC)) {
2401 2403 vlun->svl_waiting_for_activepath = 0;
2402 2404 } else {
2403 2405 drv_usecwait(1000);
2404 2406 if (vlun->svl_waiting_for_activepath
2405 2407 == 0) {
2406 2408 /*
2407 2409 * an active path has come
2408 2410 * online!
2409 2411 */
2410 2412 goto try_again;
2411 2413 }
2412 2414 }
2413 2415 }
2414 2416 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
2415 2417 if (!held) {
2416 2418 VHCI_DEBUG(4, (CE_NOTE, NULL,
2417 2419 "!Lun not held\n"));
2418 2420 if (pgr_sema_held) {
2419 2421 sema_v(&vlun->svl_pgr_sema);
2420 2422 }
2421 2423 return (TRAN_BUSY);
2422 2424 }
2423 2425 /*
2424 2426 * now that the LUN is stable, one last check
2425 2427 * to make sure no other changes sneaked in
2426 2428 * (like a path coming online or a
2427 2429 * failover initiated by another thread)
2428 2430 */
2429 2431 pip = NULL;
2430 2432 rval = mdi_select_path(cdip, vpkt->vpkt_tgt_init_bp,
2431 2433 0, NULL, &pip);
2432 2434 if (pip != NULL) {
2433 2435 VHCI_RELEASE_LUN(vlun);
2434 2436 vlun->svl_waiting_for_activepath = 0;
2435 2437 goto bind_path;
2436 2438 }
2437 2439
2438 2440 /*
2439 2441 * Check if there is an ONLINE path OR a STANDBY path
2440 2442 * available. If none is available, do not attempt
2441 2443 * to do a failover, just return a fatal error at this
2442 2444 * point.
2443 2445 */
2444 2446 npip = NULL;
2445 2447 rval = mdi_select_path(cdip, NULL,
2446 2448 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
2447 2449 NULL, &npip);
2448 2450 if ((npip == NULL) || (rval != MDI_SUCCESS)) {
2449 2451 /*
2450 2452 * No paths available, jus return FATAL error.
2451 2453 */
2452 2454 VHCI_RELEASE_LUN(vlun);
2453 2455 if (pgr_sema_held) {
2454 2456 sema_v(&vlun->svl_pgr_sema);
2455 2457 }
2456 2458 return (TRAN_FATAL_ERROR);
2457 2459 }
2458 2460 mdi_rele_path(npip);
2459 2461 if (!(vpkt->vpkt_state & VHCI_PKT_IN_FAILOVER)) {
2460 2462 VHCI_DEBUG(1, (CE_NOTE, NULL, "!invoking "
2461 2463 "mdi_failover\n"));
2462 2464 rval = mdi_failover(vhci->vhci_dip, cdip,
2463 2465 MDI_FAILOVER_ASYNC);
2464 2466 } else {
2465 2467 rval = vlun->svl_failover_status;
2466 2468 }
2467 2469 if (rval == MDI_FAILURE) {
2468 2470 VHCI_RELEASE_LUN(vlun);
2469 2471 if (pgr_sema_held) {
2470 2472 sema_v(&vlun->svl_pgr_sema);
2471 2473 }
2472 2474 return (TRAN_FATAL_ERROR);
2473 2475 } else if (rval == MDI_BUSY) {
2474 2476 VHCI_RELEASE_LUN(vlun);
2475 2477 if (pgr_sema_held) {
2476 2478 sema_v(&vlun->svl_pgr_sema);
2477 2479 }
2478 2480 return (TRAN_BUSY);
2479 2481 } else {
2480 2482 if (pgr_sema_held) {
2481 2483 sema_v(&vlun->svl_pgr_sema);
2482 2484 }
2483 2485 vpkt->vpkt_state |= VHCI_PKT_IN_FAILOVER;
2484 2486 return (TRAN_BUSY);
2485 2487 }
2486 2488 }
2487 2489 vlun->svl_waiting_for_activepath = 0;
2488 2490 bind_path:
2489 2491 vpkt->vpkt_path = pip;
2490 2492 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2491 2493 ASSERT(svp != NULL);
2492 2494
2493 2495 psd = svp->svp_psd;
2494 2496 ASSERT(psd != NULL);
2495 2497 address = &psd->sd_address;
2496 2498 } else {
2497 2499 pkt = vpkt->vpkt_hba_pkt;
2498 2500 address = &pkt->pkt_address;
2499 2501 }
2500 2502
2501 2503 /* Verify match of specified path_instance and selected path_instance */
2502 2504 ASSERT((path_instance == 0) ||
2503 2505 (path_instance == mdi_pi_get_path_instance(vpkt->vpkt_path)));
2504 2506
2505 2507 /*
2506 2508 * For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
2507 2509 * target driver calls vhci_scsi_init_pkt.
2508 2510 */
2509 2511 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) &&
2510 2512 vpkt->vpkt_path && vpkt->vpkt_hba_pkt) {
2511 2513 VHCI_DEBUG(4, (CE_NOTE, NULL,
2512 2514 "vhci_bind_transport: PKT_PARTIAL_DMA "
2513 2515 "vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
2514 2516 (void *)vpkt, (void *)vpkt->vpkt_path, (void *)pkt));
2515 2517 pkt = vpkt->vpkt_hba_pkt;
2516 2518 address = &pkt->pkt_address;
2517 2519 }
2518 2520
2519 2521 if (pkt == NULL || (vpkt->vpkt_flags & CFLAG_DMA_PARTIAL)) {
2520 2522 pkt = scsi_init_pkt(address, pkt,
2521 2523 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
2522 2524 vpkt->vpkt_tgt_init_scblen, 0, flags, func, NULL);
2523 2525
2524 2526 if (pkt == NULL) {
2525 2527 VHCI_DEBUG(4, (CE_NOTE, NULL,
2526 2528 "!bind transport: 0x%p 0x%p 0x%p\n",
2527 2529 (void *)vhci, (void *)psd, (void *)vpkt));
2528 2530 if ((vpkt->vpkt_hba_pkt == NULL) && vpkt->vpkt_path) {
2529 2531 MDI_PI_ERRSTAT(vpkt->vpkt_path,
2530 2532 MDI_PI_TRANSERR);
2531 2533 mdi_rele_path(vpkt->vpkt_path);
2532 2534 vpkt->vpkt_path = NULL;
2533 2535 }
2534 2536 if (pgr_sema_held) {
2535 2537 sema_v(&vlun->svl_pgr_sema);
2536 2538 }
2537 2539 /*
2538 2540 * Consider it a fatal error if b_error is
2539 2541 * set as a result of DMA binding failure
2540 2542 * vs. a condition of being temporarily out of
2541 2543 * some resource
2542 2544 */
2543 2545 if (vpkt->vpkt_tgt_init_bp == NULL ||
2544 2546 geterror(vpkt->vpkt_tgt_init_bp))
2545 2547 return (TRAN_FATAL_ERROR);
2546 2548 else
2547 2549 return (TRAN_BUSY);
2548 2550 }
2549 2551 }
2550 2552
2551 2553 pkt->pkt_private = vpkt;
2552 2554 vpkt->vpkt_hba_pkt = pkt;
2553 2555 return (TRAN_ACCEPT);
2554 2556 }
2555 2557
2556 2558
2557 2559 /*PRINTFLIKE3*/
2558 2560 void
2559 2561 vhci_log(int level, dev_info_t *dip, const char *fmt, ...)
2560 2562 {
2561 2563 char buf[256];
2562 2564 va_list ap;
2563 2565
2564 2566 va_start(ap, fmt);
2565 2567 (void) vsprintf(buf, fmt, ap);
2566 2568 va_end(ap);
2567 2569
2568 2570 scsi_log(dip, "scsi_vhci", level, buf);
2569 2571 }
2570 2572
2571 2573 /* do a PGR out with the information we've saved away */
2572 2574 static int
2573 2575 vhci_do_prout(scsi_vhci_priv_t *svp)
2574 2576 {
2575 2577
2576 2578 struct scsi_pkt *new_pkt;
2577 2579 struct buf *bp;
2578 2580 scsi_vhci_lun_t *vlun = svp->svp_svl;
2579 2581 int rval, retry, nr_retry, ua_retry;
2580 2582 uint8_t *sns, skey;
2581 2583
2582 2584 bp = getrbuf(KM_SLEEP);
2583 2585 bp->b_flags = B_WRITE;
2584 2586 bp->b_resid = 0;
2585 2587 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2586 2588 bp->b_bcount = vlun->svl_bcount;
2587 2589
2588 2590 VHCI_INCR_PATH_CMDCOUNT(svp);
2589 2591
2590 2592 new_pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
2591 2593 CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
2592 2594 SLEEP_FUNC, NULL);
2593 2595 if (new_pkt == NULL) {
2594 2596 VHCI_DECR_PATH_CMDCOUNT(svp);
2595 2597 freerbuf(bp);
2596 2598 cmn_err(CE_WARN, "!vhci_do_prout: scsi_init_pkt failed");
2597 2599 return (0);
2598 2600 }
2599 2601 mutex_enter(&vlun->svl_mutex);
2600 2602 bp->b_un.b_addr = (caddr_t)&vlun->svl_prout;
2601 2603 bp->b_bcount = vlun->svl_bcount;
2602 2604 bcopy(vlun->svl_cdb, new_pkt->pkt_cdbp,
2603 2605 sizeof (vlun->svl_cdb));
2604 2606 new_pkt->pkt_time = vlun->svl_time;
2605 2607 mutex_exit(&vlun->svl_mutex);
2606 2608 new_pkt->pkt_flags = FLAG_NOINTR;
2607 2609
2608 2610 ua_retry = nr_retry = retry = 0;
2609 2611 again:
2610 2612 rval = vhci_do_scsi_cmd(new_pkt);
2611 2613 if (rval != 1) {
2612 2614 if ((new_pkt->pkt_reason == CMD_CMPLT) &&
2613 2615 (SCBP_C(new_pkt) == STATUS_CHECK) &&
2614 2616 (new_pkt->pkt_state & STATE_ARQ_DONE)) {
2615 2617 sns = (uint8_t *)
2616 2618 &(((struct scsi_arq_status *)(uintptr_t)
2617 2619 (new_pkt->pkt_scbp))->sts_sensedata);
2618 2620 skey = scsi_sense_key(sns);
2619 2621 if ((skey == KEY_UNIT_ATTENTION) ||
2620 2622 (skey == KEY_NOT_READY)) {
2621 2623 int max_retry;
2622 2624 struct scsi_failover_ops *fops;
2623 2625 fops = vlun->svl_fops;
2624 2626 rval = fops->sfo_analyze_sense(svp->svp_psd,
2625 2627 sns, vlun->svl_fops_ctpriv);
2626 2628 if (rval == SCSI_SENSE_NOT_READY) {
2627 2629 max_retry = vhci_prout_not_ready_retry;
2628 2630 retry = nr_retry++;
2629 2631 delay(1*drv_usectohz(1000000));
2630 2632 } else {
2631 2633 /* chk for state change and update */
2632 2634 if (rval == SCSI_SENSE_STATE_CHANGED) {
2633 2635 int held;
2634 2636 VHCI_HOLD_LUN(vlun,
2635 2637 VH_NOSLEEP, held);
2636 2638 if (!held) {
2637 2639 rval = TRAN_BUSY;
2638 2640 } else {
2639 2641 /* chk for alua first */
2640 2642 vhci_update_pathstates(
2641 2643 (void *)vlun);
2642 2644 }
2643 2645 }
2644 2646 retry = ua_retry++;
2645 2647 max_retry = VHCI_MAX_PGR_RETRIES;
2646 2648 }
2647 2649 if (retry < max_retry) {
2648 2650 VHCI_DEBUG(4, (CE_WARN, NULL,
2649 2651 "!vhci_do_prout retry 0x%x "
2650 2652 "(0x%x 0x%x 0x%x)",
2651 2653 SCBP_C(new_pkt),
2652 2654 new_pkt->pkt_cdbp[0],
2653 2655 new_pkt->pkt_cdbp[1],
2654 2656 new_pkt->pkt_cdbp[2]));
2655 2657 goto again;
2656 2658 }
2657 2659 rval = 0;
2658 2660 VHCI_DEBUG(4, (CE_WARN, NULL,
2659 2661 "!vhci_do_prout 0x%x "
2660 2662 "(0x%x 0x%x 0x%x)",
2661 2663 SCBP_C(new_pkt),
2662 2664 new_pkt->pkt_cdbp[0],
2663 2665 new_pkt->pkt_cdbp[1],
2664 2666 new_pkt->pkt_cdbp[2]));
2665 2667 } else if (skey == KEY_ILLEGAL_REQUEST)
2666 2668 rval = VHCI_PGR_ILLEGALOP;
2667 2669 }
2668 2670 } else {
2669 2671 rval = 1;
2670 2672 }
2671 2673 scsi_destroy_pkt(new_pkt);
2672 2674 VHCI_DECR_PATH_CMDCOUNT(svp);
2673 2675 freerbuf(bp);
2674 2676 return (rval);
2675 2677 }
2676 2678
2677 2679 static void
2678 2680 vhci_run_cmd(void *arg)
2679 2681 {
2680 2682 struct scsi_pkt *pkt = (struct scsi_pkt *)arg;
2681 2683 struct scsi_pkt *tpkt;
2682 2684 scsi_vhci_priv_t *svp;
2683 2685 mdi_pathinfo_t *pip, *npip;
2684 2686 scsi_vhci_lun_t *vlun;
2685 2687 dev_info_t *cdip;
2686 2688 scsi_vhci_priv_t *nsvp;
2687 2689 int fail = 0;
2688 2690 int rval;
2689 2691 struct vhci_pkt *vpkt;
2690 2692 uchar_t cdb_1;
2691 2693 vhci_prout_t *prout;
2692 2694
2693 2695 vpkt = (struct vhci_pkt *)pkt->pkt_private;
2694 2696 tpkt = vpkt->vpkt_tgt_pkt;
2695 2697 pip = vpkt->vpkt_path;
2696 2698 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
2697 2699 if (svp == NULL) {
2698 2700 tpkt->pkt_reason = CMD_TRAN_ERR;
2699 2701 tpkt->pkt_statistics = STAT_ABORTED;
2700 2702 goto done;
2701 2703 }
2702 2704 vlun = svp->svp_svl;
2703 2705 prout = &vlun->svl_prout;
2704 2706 if (SCBP_C(pkt) != STATUS_GOOD)
2705 2707 fail++;
2706 2708 cdip = vlun->svl_dip;
2707 2709 pip = npip = NULL;
2708 2710 rval = mdi_select_path(cdip, NULL,
2709 2711 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH, NULL, &npip);
2710 2712 if ((rval != MDI_SUCCESS) || (npip == NULL)) {
2711 2713 VHCI_DEBUG(4, (CE_NOTE, NULL,
2712 2714 "vhci_run_cmd: no path! 0x%p\n", (void *)svp));
2713 2715 tpkt->pkt_reason = CMD_TRAN_ERR;
2714 2716 tpkt->pkt_statistics = STAT_ABORTED;
2715 2717 goto done;
2716 2718 }
2717 2719
2718 2720 cdb_1 = vlun->svl_cdb[1];
2719 2721 vlun->svl_cdb[1] &= 0xe0;
2720 2722 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
2721 2723
2722 2724 do {
2723 2725 nsvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
2724 2726 if (nsvp == NULL) {
2725 2727 VHCI_DEBUG(4, (CE_NOTE, NULL,
2726 2728 "vhci_run_cmd: no "
2727 2729 "client priv! 0x%p offlined?\n",
2728 2730 (void *)npip));
2729 2731 goto next_path;
2730 2732 }
2731 2733 if (vlun->svl_first_path == npip) {
2732 2734 goto next_path;
2733 2735 } else {
2734 2736 if (vhci_do_prout(nsvp) != 1)
2735 2737 fail++;
2736 2738 }
2737 2739 next_path:
2738 2740 pip = npip;
2739 2741 rval = mdi_select_path(cdip, NULL,
2740 2742 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
2741 2743 pip, &npip);
2742 2744 mdi_rele_path(pip);
2743 2745 } while ((rval == MDI_SUCCESS) && (npip != NULL));
2744 2746
2745 2747 vlun->svl_cdb[1] = cdb_1;
2746 2748
2747 2749 if (fail) {
2748 2750 VHCI_DEBUG(4, (CE_WARN, NULL, "%s%d: key registration failed, "
2749 2751 "couldn't be replicated on all paths",
2750 2752 ddi_driver_name(cdip), ddi_get_instance(cdip)));
2751 2753 vhci_print_prout_keys(vlun, "vhci_run_cmd: ");
2752 2754
2753 2755 if (SCBP_C(pkt) != STATUS_GOOD) {
2754 2756 tpkt->pkt_reason = CMD_TRAN_ERR;
2755 2757 tpkt->pkt_statistics = STAT_ABORTED;
2756 2758 }
2757 2759 } else {
2758 2760 vlun->svl_pgr_active = 1;
2759 2761 vhci_print_prout_keys(vlun, "vhci_run_cmd: before bcopy:");
2760 2762
2761 2763 bcopy((const void *)prout->service_key,
2762 2764 (void *)prout->active_service_key, MHIOC_RESV_KEY_SIZE);
2763 2765 bcopy((const void *)prout->res_key,
2764 2766 (void *)prout->active_res_key, MHIOC_RESV_KEY_SIZE);
2765 2767
2766 2768 vhci_print_prout_keys(vlun, "vhci_run_cmd: after bcopy:");
2767 2769 }
2768 2770 done:
2769 2771 if (SCBP_C(pkt) == STATUS_GOOD)
2770 2772 vlun->svl_first_path = NULL;
2771 2773
2772 2774 if (svp)
2773 2775 VHCI_DECR_PATH_CMDCOUNT(svp);
2774 2776
2775 2777 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
2776 2778 scsi_destroy_pkt(pkt);
2777 2779 vpkt->vpkt_hba_pkt = NULL;
2778 2780 if (vpkt->vpkt_path) {
2779 2781 mdi_rele_path(vpkt->vpkt_path);
2780 2782 vpkt->vpkt_path = NULL;
2781 2783 }
2782 2784 }
2783 2785
2784 2786 sema_v(&vlun->svl_pgr_sema);
2785 2787 /*
2786 2788 * The PROUT commands are not included in the automatic retry
2787 2789 * mechanism, therefore, vpkt_org_vpkt should never be set here.
2788 2790 */
2789 2791 ASSERT(vpkt->vpkt_org_vpkt == NULL);
2790 2792 scsi_hba_pkt_comp(tpkt);
2791 2793 }
2792 2794
2793 2795 /*
2794 2796 * Get the keys registered with this target. Since we will have
2795 2797 * registered the same key with multiple initiators, strip out
2796 2798 * any duplicate keys.
2797 2799 *
2798 2800 * The pointers which will be used to filter the registered keys from
2799 2801 * the device will be stored in filter_prin and filter_pkt. If the
2800 2802 * allocation length of the buffer was sufficient for the number of
2801 2803 * parameter data bytes available to be returned by the device then the
2802 2804 * key filtering will use the keylist returned from the original
2803 2805 * request. If the allocation length of the buffer was not sufficient,
2804 2806 * then the filtering will use the keylist returned from the request
2805 2807 * that is resent below.
2806 2808 *
2807 2809 * If the device returns an additional length field that is greater than
2808 2810 * the allocation length of the buffer, then allocate a new buffer which
2809 2811 * can accommodate the number of parameter data bytes available to be
2810 2812 * returned. Resend the scsi PRIN command, filter out the duplicate
2811 2813 * keys and return as many of the unique keys found that was originally
2812 2814 * requested and set the additional length field equal to the data bytes
2813 2815 * of unique reservation keys available to be returned.
2814 2816 *
2815 2817 * If the device returns an additional length field that is less than or
2816 2818 * equal to the allocation length of the buffer, then all the available
2817 2819 * keys registered were returned by the device. Filter out the
2818 2820 * duplicate keys and return all of the unique keys found and set the
2819 2821 * additional length field equal to the data bytes of the reservation
2820 2822 * keys to be returned.
2821 2823 */
2822 2824
2823 2825 #define VHCI_PRIN_HEADER_SZ (sizeof (prin->length) + sizeof (prin->generation))
2824 2826
2825 2827 static int
2826 2828 vhci_do_prin(struct vhci_pkt **intr_vpkt)
2827 2829 {
2828 2830 scsi_vhci_priv_t *svp;
2829 2831 struct vhci_pkt *vpkt = *intr_vpkt;
2830 2832 vhci_prin_readkeys_t *prin;
2831 2833 scsi_vhci_lun_t *vlun;
2832 2834 struct scsi_vhci *vhci = ADDR2VHCI(&vpkt->vpkt_tgt_pkt->pkt_address);
2833 2835
2834 2836 struct buf *new_bp = NULL;
2835 2837 struct scsi_pkt *new_pkt = NULL;
2836 2838 struct vhci_pkt *new_vpkt = NULL;
2837 2839 uint32_t needed_length;
2838 2840 int rval = VHCI_CMD_CMPLT;
2839 2841 uint32_t prin_length = 0;
2840 2842 uint32_t svl_prin_length = 0;
2841 2843
2842 2844 ASSERT(vpkt->vpkt_path);
2843 2845 svp = mdi_pi_get_vhci_private(vpkt->vpkt_path);
2844 2846 ASSERT(svp);
2845 2847 vlun = svp->svp_svl;
2846 2848 ASSERT(vlun);
2847 2849
2848 2850 /*
2849 2851 * If the caller only asked for an amount of data that would not
2850 2852 * be enough to include any key data it is likely that they will
2851 2853 * send the next command with a buffer size based on the information
2852 2854 * from this header. Doing recovery on this would be a duplication
2853 2855 * of efforts.
2854 2856 */
2855 2857 if (vpkt->vpkt_tgt_init_bp->b_bcount <= VHCI_PRIN_HEADER_SZ) {
2856 2858 rval = VHCI_CMD_CMPLT;
2857 2859 goto exit;
2858 2860 }
2859 2861
2860 2862 if (vpkt->vpkt_org_vpkt == NULL) {
2861 2863 /*
2862 2864 * Can fail as sleep is not allowed.
2863 2865 */
2864 2866 prin = (vhci_prin_readkeys_t *)
2865 2867 bp_mapin_common(vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
2866 2868 } else {
2867 2869 /*
2868 2870 * The retry buf doesn't need to be mapped in.
2869 2871 */
2870 2872 prin = (vhci_prin_readkeys_t *)
2871 2873 vpkt->vpkt_tgt_init_bp->b_un.b_daddr;
2872 2874 }
2873 2875
2874 2876 if (prin == NULL) {
2875 2877 VHCI_DEBUG(5, (CE_WARN, NULL,
2876 2878 "vhci_do_prin: bp_mapin_common failed."));
2877 2879 rval = VHCI_CMD_ERROR;
2878 2880 goto fail;
2879 2881 }
2880 2882
2881 2883 prin_length = BE_32(prin->length);
2882 2884
2883 2885 /*
2884 2886 * According to SPC-3r22, sec 4.3.4.6: "If the amount of
2885 2887 * information to be transferred exceeds the maximum value
2886 2888 * that the ALLOCATION LENGTH field is capable of specifying,
2887 2889 * the device server shall...terminate the command with CHECK
2888 2890 * CONDITION status". The ALLOCATION LENGTH field of the
2889 2891 * PERSISTENT RESERVE IN command is 2 bytes. We should never
2890 2892 * get here with an ADDITIONAL LENGTH greater than 0xFFFF
2891 2893 * so if we do, then it is an error!
2892 2894 */
2893 2895
2894 2896
2895 2897 if ((prin_length + VHCI_PRIN_HEADER_SZ) > 0xFFFF) {
2896 2898 VHCI_DEBUG(5, (CE_NOTE, NULL,
2897 2899 "vhci_do_prin: Device returned invalid "
2898 2900 "length 0x%x\n", prin_length));
2899 2901 rval = VHCI_CMD_ERROR;
2900 2902 goto fail;
2901 2903 }
2902 2904 needed_length = prin_length + VHCI_PRIN_HEADER_SZ;
2903 2905
2904 2906 /*
2905 2907 * If prin->length is greater than the byte count allocated in the
2906 2908 * original buffer, then resend the request with enough buffer
2907 2909 * allocated to get all of the available registered keys.
2908 2910 */
2909 2911 if ((vpkt->vpkt_tgt_init_bp->b_bcount < needed_length) &&
2910 2912 (vpkt->vpkt_org_vpkt == NULL)) {
2911 2913
2912 2914 new_pkt = vhci_create_retry_pkt(vpkt);
2913 2915 if (new_pkt == NULL) {
2914 2916 rval = VHCI_CMD_ERROR;
2915 2917 goto fail;
2916 2918 }
2917 2919 new_vpkt = TGTPKT2VHCIPKT(new_pkt);
2918 2920
2919 2921 /*
2920 2922 * This is the buf with buffer pointer
2921 2923 * where the prin readkeys will be
2922 2924 * returned from the device
2923 2925 */
2924 2926 new_bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
2925 2927 NULL, needed_length, B_READ, NULL_FUNC, NULL);
2926 2928 if ((new_bp == NULL) || (new_bp->b_un.b_addr == NULL)) {
2927 2929 if (new_bp) {
2928 2930 scsi_free_consistent_buf(new_bp);
2929 2931 }
2930 2932 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2931 2933 rval = VHCI_CMD_ERROR;
2932 2934 goto fail;
2933 2935 }
2934 2936 new_bp->b_bcount = needed_length;
2935 2937 new_pkt->pkt_cdbp[7] = (uchar_t)(needed_length >> 8);
2936 2938 new_pkt->pkt_cdbp[8] = (uchar_t)needed_length;
2937 2939
2938 2940 rval = VHCI_CMD_RETRY;
2939 2941
2940 2942 new_vpkt->vpkt_tgt_init_bp = new_bp;
2941 2943 }
2942 2944
2943 2945 if (rval == VHCI_CMD_RETRY) {
2944 2946
2945 2947 /*
2946 2948 * There were more keys then the original request asked for.
2947 2949 */
2948 2950 mdi_pathinfo_t *path_holder = vpkt->vpkt_path;
2949 2951
2950 2952 /*
2951 2953 * Release the old path because it does not matter which path
2952 2954 * this command is sent down. This allows the normal bind
2953 2955 * transport mechanism to be used.
2954 2956 */
2955 2957 if (vpkt->vpkt_path != NULL) {
2956 2958 mdi_rele_path(vpkt->vpkt_path);
2957 2959 vpkt->vpkt_path = NULL;
2958 2960 }
2959 2961
2960 2962 /*
2961 2963 * Dispatch the retry command
2962 2964 */
2963 2965 if (taskq_dispatch(vhci->vhci_taskq, vhci_dispatch_scsi_start,
2964 2966 (void *) new_vpkt, KM_NOSLEEP) == NULL) {
2965 2967 if (path_holder) {
2966 2968 vpkt->vpkt_path = path_holder;
2967 2969 mdi_hold_path(path_holder);
2968 2970 }
2969 2971 scsi_free_consistent_buf(new_bp);
2970 2972 vhci_scsi_destroy_pkt(&new_pkt->pkt_address, new_pkt);
2971 2973 rval = VHCI_CMD_ERROR;
2972 2974 goto fail;
2973 2975 }
2974 2976
2975 2977 /*
2976 2978 * If we return VHCI_CMD_RETRY, that means the caller
2977 2979 * is going to bail and wait for the reissued command
2978 2980 * to complete. In that case, we need to decrement
2979 2981 * the path command count right now. In any other
2980 2982 * case, it'll be decremented by the caller.
2981 2983 */
2982 2984 VHCI_DECR_PATH_CMDCOUNT(svp);
2983 2985 goto exit;
2984 2986
2985 2987 }
2986 2988
2987 2989 if (rval == VHCI_CMD_CMPLT) {
2988 2990 /*
2989 2991 * The original request got all of the keys or the recovery
2990 2992 * packet returns.
2991 2993 */
2992 2994 int new;
2993 2995 int old;
2994 2996 int num_keys = prin_length / MHIOC_RESV_KEY_SIZE;
2995 2997
2996 2998 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_do_prin: %d keys read\n",
2997 2999 num_keys));
2998 3000
2999 3001 #ifdef DEBUG
3000 3002 VHCI_DEBUG(5, (CE_NOTE, NULL, "vhci_do_prin: from storage\n"));
3001 3003 if (vhci_debug == 5)
3002 3004 vhci_print_prin_keys(prin, num_keys);
3003 3005 VHCI_DEBUG(5, (CE_NOTE, NULL,
3004 3006 "vhci_do_prin: MPxIO old keys:\n"));
3005 3007 if (vhci_debug == 5)
3006 3008 vhci_print_prin_keys(&vlun->svl_prin, num_keys);
3007 3009 #endif
3008 3010
3009 3011 /*
3010 3012 * Filter out all duplicate keys returned from the device
3011 3013 * We know that we use a different key for every host, so we
3012 3014 * can simply strip out duplicates. Otherwise we would need to
3013 3015 * do more bookkeeping to figure out which keys to strip out.
3014 3016 */
3015 3017
3016 3018 new = 0;
3017 3019
3018 3020 /*
3019 3021 * If we got at least 1 key copy it.
3020 3022 */
3021 3023 if (num_keys > 0) {
3022 3024 vlun->svl_prin.keylist[0] = prin->keylist[0];
3023 3025 new++;
3024 3026 }
3025 3027
3026 3028 /*
3027 3029 * find next unique key.
3028 3030 */
3029 3031 for (old = 1; old < num_keys; old++) {
3030 3032 int j;
3031 3033 int match = 0;
3032 3034
3033 3035 if (new >= VHCI_NUM_RESV_KEYS)
3034 3036 break;
3035 3037 for (j = 0; j < new; j++) {
3036 3038 if (bcmp(&prin->keylist[old],
3037 3039 &vlun->svl_prin.keylist[j],
3038 3040 sizeof (mhioc_resv_key_t)) == 0) {
3039 3041 match = 1;
3040 3042 break;
3041 3043 }
3042 3044 }
3043 3045 if (!match) {
3044 3046 vlun->svl_prin.keylist[new] =
3045 3047 prin->keylist[old];
3046 3048 new++;
3047 3049 }
3048 3050 }
3049 3051
3050 3052 /* Stored Big Endian */
3051 3053 vlun->svl_prin.generation = prin->generation;
3052 3054 svl_prin_length = new * sizeof (mhioc_resv_key_t);
3053 3055 /* Stored Big Endian */
3054 3056 vlun->svl_prin.length = BE_32(svl_prin_length);
3055 3057 svl_prin_length += VHCI_PRIN_HEADER_SZ;
3056 3058
3057 3059 /*
3058 3060 * If we arrived at this point after issuing a retry, make sure
3059 3061 * that we put everything back the way it originally was so
3060 3062 * that the target driver can complete the command correctly.
3061 3063 */
3062 3064 if (vpkt->vpkt_org_vpkt != NULL) {
3063 3065 new_bp = vpkt->vpkt_tgt_init_bp;
3064 3066
3065 3067 scsi_free_consistent_buf(new_bp);
3066 3068
3067 3069 vpkt = vhci_sync_retry_pkt(vpkt);
3068 3070 *intr_vpkt = vpkt;
3069 3071
3070 3072 /*
3071 3073 * Make sure the original buffer is mapped into kernel
3072 3074 * space before we try to copy the filtered keys into
3073 3075 * it.
3074 3076 */
3075 3077 prin = (vhci_prin_readkeys_t *)bp_mapin_common(
3076 3078 vpkt->vpkt_tgt_init_bp, VM_NOSLEEP);
3077 3079 }
3078 3080
3079 3081 /*
3080 3082 * Now copy the desired number of prin keys into the original
3081 3083 * target buffer.
3082 3084 */
3083 3085 if (svl_prin_length <= vpkt->vpkt_tgt_init_bp->b_bcount) {
3084 3086 /*
3085 3087 * It is safe to return all of the available unique
3086 3088 * keys
3087 3089 */
3088 3090 bcopy(&vlun->svl_prin, prin, svl_prin_length);
3089 3091 } else {
3090 3092 /*
3091 3093 * Not all of the available keys were requested by the
3092 3094 * original command.
3093 3095 */
3094 3096 bcopy(&vlun->svl_prin, prin,
3095 3097 vpkt->vpkt_tgt_init_bp->b_bcount);
3096 3098 }
3097 3099 #ifdef DEBUG
3098 3100 VHCI_DEBUG(5, (CE_NOTE, NULL,
3099 3101 "vhci_do_prin: To Application:\n"));
3100 3102 if (vhci_debug == 5)
3101 3103 vhci_print_prin_keys(prin, new);
3102 3104 VHCI_DEBUG(5, (CE_NOTE, NULL,
3103 3105 "vhci_do_prin: MPxIO new keys:\n"));
3104 3106 if (vhci_debug == 5)
3105 3107 vhci_print_prin_keys(&vlun->svl_prin, new);
3106 3108 #endif
3107 3109 }
3108 3110 fail:
3109 3111 if (rval == VHCI_CMD_ERROR) {
3110 3112 /*
3111 3113 * If we arrived at this point after issuing a
3112 3114 * retry, make sure that we put everything back
3113 3115 * the way it originally was so that ssd can
3114 3116 * complete the command correctly.
3115 3117 */
3116 3118
3117 3119 if (vpkt->vpkt_org_vpkt != NULL) {
3118 3120 new_bp = vpkt->vpkt_tgt_init_bp;
3119 3121 if (new_bp != NULL) {
3120 3122 scsi_free_consistent_buf(new_bp);
3121 3123 }
3122 3124
3123 3125 new_vpkt = vpkt;
3124 3126 vpkt = vpkt->vpkt_org_vpkt;
3125 3127
3126 3128 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3127 3129 new_vpkt->vpkt_tgt_pkt);
3128 3130 }
3129 3131
3130 3132 /*
3131 3133 * Mark this command completion as having an error so that
3132 3134 * ssd will retry the command.
3133 3135 */
3134 3136
3135 3137 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3136 3138 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3137 3139
3138 3140 rval = VHCI_CMD_CMPLT;
3139 3141 }
3140 3142 exit:
3141 3143 /*
3142 3144 * Make sure that the semaphore is only released once.
3143 3145 */
3144 3146 if (rval == VHCI_CMD_CMPLT) {
3145 3147 sema_v(&vlun->svl_pgr_sema);
3146 3148 }
3147 3149
3148 3150 return (rval);
3149 3151 }
3150 3152
3151 3153 static void
3152 3154 vhci_intr(struct scsi_pkt *pkt)
3153 3155 {
3154 3156 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3155 3157 struct scsi_pkt *tpkt;
3156 3158 scsi_vhci_priv_t *svp;
3157 3159 scsi_vhci_lun_t *vlun;
3158 3160 int rval, held;
3159 3161 struct scsi_failover_ops *fops;
3160 3162 uint8_t *sns, skey, asc, ascq;
3161 3163 mdi_pathinfo_t *lpath;
3162 3164 static char *timeout_err = "Command Timeout";
3163 3165 static char *parity_err = "Parity Error";
3164 3166 char *err_str = NULL;
3165 3167 dev_info_t *vdip, *cdip;
3166 3168 char *cpath;
3167 3169
3168 3170 ASSERT(vpkt != NULL);
3169 3171 tpkt = vpkt->vpkt_tgt_pkt;
3170 3172 ASSERT(tpkt != NULL);
3171 3173 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3172 3174 ASSERT(svp != NULL);
3173 3175 vlun = svp->svp_svl;
3174 3176 ASSERT(vlun != NULL);
3175 3177 lpath = vpkt->vpkt_path;
3176 3178
3177 3179 /*
3178 3180 * sync up the target driver's pkt with the pkt that
3179 3181 * we actually used
3180 3182 */
3181 3183 *(tpkt->pkt_scbp) = *(pkt->pkt_scbp);
3182 3184 tpkt->pkt_resid = pkt->pkt_resid;
3183 3185 tpkt->pkt_state = pkt->pkt_state;
3184 3186 tpkt->pkt_statistics = pkt->pkt_statistics;
3185 3187 tpkt->pkt_reason = pkt->pkt_reason;
3186 3188
3187 3189 /* Return path_instance information back to the target driver. */
3188 3190 if (scsi_pkt_allocated_correctly(tpkt)) {
3189 3191 if (scsi_pkt_allocated_correctly(pkt)) {
3190 3192 /*
3191 3193 * If both packets were correctly allocated,
3192 3194 * return path returned by pHCI.
3193 3195 */
3194 3196 tpkt->pkt_path_instance = pkt->pkt_path_instance;
3195 3197 } else {
3196 3198 /* Otherwise return path of pHCI we used */
3197 3199 tpkt->pkt_path_instance =
3198 3200 mdi_pi_get_path_instance(lpath);
3199 3201 }
3200 3202 }
3201 3203
3202 3204 if (pkt->pkt_cdbp[0] == SCMD_PROUT &&
3203 3205 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3204 3206 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE)) {
3205 3207 if ((SCBP_C(pkt) != STATUS_GOOD) ||
3206 3208 (pkt->pkt_reason != CMD_CMPLT)) {
3207 3209 sema_v(&vlun->svl_pgr_sema);
3208 3210 }
3209 3211 } else if (pkt->pkt_cdbp[0] == SCMD_PRIN) {
3210 3212 if (pkt->pkt_reason != CMD_CMPLT ||
3211 3213 (SCBP_C(pkt) != STATUS_GOOD)) {
3212 3214 sema_v(&vlun->svl_pgr_sema);
3213 3215 }
3214 3216 }
3215 3217
3216 3218 switch (pkt->pkt_reason) {
3217 3219 case CMD_CMPLT:
3218 3220 /*
3219 3221 * cmd completed successfully, check for scsi errors
3220 3222 */
3221 3223 switch (*(pkt->pkt_scbp)) {
3222 3224 case STATUS_CHECK:
3223 3225 if (pkt->pkt_state & STATE_ARQ_DONE) {
3224 3226 sns = (uint8_t *)
3225 3227 &(((struct scsi_arq_status *)(uintptr_t)
3226 3228 (pkt->pkt_scbp))->sts_sensedata);
3227 3229 skey = scsi_sense_key(sns);
3228 3230 asc = scsi_sense_asc(sns);
3229 3231 ascq = scsi_sense_ascq(sns);
3230 3232 fops = vlun->svl_fops;
3231 3233 ASSERT(fops != NULL);
3232 3234 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_intr: "
3233 3235 "Received sns key %x esc %x escq %x\n",
3234 3236 skey, asc, ascq));
3235 3237
3236 3238 if (vlun->svl_waiting_for_activepath == 1) {
3237 3239 /*
3238 3240 * if we are here it means we are
3239 3241 * in the midst of a probe/attach
3240 3242 * through a passive path; this
3241 3243 * case is exempt from sense analysis
3242 3244 * for detection of ext. failover
3243 3245 * because that would unnecessarily
3244 3246 * increase attach time.
3245 3247 */
3246 3248 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3247 3249 vpkt->vpkt_tgt_init_scblen);
3248 3250 break;
3249 3251 }
3250 3252 if (asc == VHCI_SCSI_PERR) {
3251 3253 /*
3252 3254 * parity error
3253 3255 */
3254 3256 err_str = parity_err;
3255 3257 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3256 3258 vpkt->vpkt_tgt_init_scblen);
3257 3259 break;
3258 3260 }
3259 3261 rval = fops->sfo_analyze_sense(svp->svp_psd,
3260 3262 sns, vlun->svl_fops_ctpriv);
3261 3263 if ((rval == SCSI_SENSE_NOFAILOVER) ||
3262 3264 (rval == SCSI_SENSE_UNKNOWN) ||
3263 3265 (rval == SCSI_SENSE_NOT_READY)) {
3264 3266 bcopy(pkt->pkt_scbp, tpkt->pkt_scbp,
3265 3267 vpkt->vpkt_tgt_init_scblen);
3266 3268 break;
3267 3269 } else if (rval == SCSI_SENSE_STATE_CHANGED) {
3268 3270 struct scsi_vhci *vhci;
3269 3271 vhci = ADDR2VHCI(&tpkt->pkt_address);
3270 3272 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3271 3273 if (!held) {
3272 3274 /*
3273 3275 * looks like some other thread
3274 3276 * has already detected this
3275 3277 * condition
3276 3278 */
3277 3279 tpkt->pkt_state &=
3278 3280 ~STATE_ARQ_DONE;
3279 3281 *(tpkt->pkt_scbp) =
3280 3282 STATUS_BUSY;
3281 3283 break;
3282 3284 }
3283 3285 (void) taskq_dispatch(
3284 3286 vhci->vhci_update_pathstates_taskq,
3285 3287 vhci_update_pathstates,
3286 3288 (void *)vlun, KM_SLEEP);
3287 3289 } else {
3288 3290 /*
3289 3291 * externally initiated failover
3290 3292 * has occurred or is in progress
3291 3293 */
3292 3294 VHCI_HOLD_LUN(vlun, VH_NOSLEEP, held);
3293 3295 if (!held) {
3294 3296 /*
3295 3297 * looks like some other thread
3296 3298 * has already detected this
3297 3299 * condition
3298 3300 */
3299 3301 tpkt->pkt_state &=
3300 3302 ~STATE_ARQ_DONE;
3301 3303 *(tpkt->pkt_scbp) =
3302 3304 STATUS_BUSY;
3303 3305 break;
3304 3306 } else {
3305 3307 rval = vhci_handle_ext_fo
3306 3308 (pkt, rval);
3307 3309 if (rval == BUSY_RETURN) {
3308 3310 tpkt->pkt_state &=
3309 3311 ~STATE_ARQ_DONE;
3310 3312 *(tpkt->pkt_scbp) =
3311 3313 STATUS_BUSY;
3312 3314 break;
3313 3315 }
3314 3316 bcopy(pkt->pkt_scbp,
3315 3317 tpkt->pkt_scbp,
3316 3318 vpkt->vpkt_tgt_init_scblen);
3317 3319 break;
3318 3320 }
3319 3321 }
3320 3322 }
3321 3323 break;
3322 3324
3323 3325 /*
3324 3326 * If this is a good SCSI-II RELEASE cmd completion then restore
3325 3327 * the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
3326 3328 * If this is a good SCSI-II RESERVE cmd completion then set
3327 3329 * VLUN_RESERVE_ACTIVE_FLG.
3328 3330 */
3329 3331 case STATUS_GOOD:
3330 3332 if ((pkt->pkt_cdbp[0] == SCMD_RELEASE) ||
3331 3333 (pkt->pkt_cdbp[0] == SCMD_RELEASE_G1)) {
3332 3334 (void) mdi_set_lb_policy(vlun->svl_dip,
3333 3335 vlun->svl_lb_policy_save);
3334 3336 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
3335 3337 VHCI_DEBUG(1, (CE_WARN, NULL,
3336 3338 "!vhci_intr: vlun 0x%p release path 0x%p",
3337 3339 (void *)vlun, (void *)vpkt->vpkt_path));
3338 3340 }
3339 3341
3340 3342 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3341 3343 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3342 3344 vlun->svl_flags |= VLUN_RESERVE_ACTIVE_FLG;
3343 3345 vlun->svl_resrv_pip = vpkt->vpkt_path;
3344 3346 VHCI_DEBUG(1, (CE_WARN, NULL,
3345 3347 "!vhci_intr: vlun 0x%p reserved path 0x%p",
3346 3348 (void *)vlun, (void *)vpkt->vpkt_path));
3347 3349 }
3348 3350 break;
3349 3351
3350 3352 case STATUS_RESERVATION_CONFLICT:
3351 3353 VHCI_DEBUG(1, (CE_WARN, NULL,
3352 3354 "!vhci_intr: vlun 0x%p "
3353 3355 "reserve conflict on path 0x%p",
3354 3356 (void *)vlun, (void *)vpkt->vpkt_path));
3355 3357 /* FALLTHROUGH */
3356 3358 default:
3357 3359 break;
3358 3360 }
3359 3361
3360 3362 /*
3361 3363 * Update I/O completion statistics for the path
3362 3364 */
3363 3365 mdi_pi_kstat_iosupdate(vpkt->vpkt_path, vpkt->vpkt_tgt_init_bp);
3364 3366
3365 3367 /*
3366 3368 * Command completed successfully, release the dma binding and
3367 3369 * destroy the transport side of the packet.
3368 3370 */
3369 3371 if ((pkt->pkt_cdbp[0] == SCMD_PROUT) &&
3370 3372 (((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_REGISTER) ||
3371 3373 ((pkt->pkt_cdbp[1] & 0x1f) == VHCI_PROUT_R_AND_IGNORE))) {
3372 3374 if (SCBP_C(pkt) == STATUS_GOOD) {
3373 3375 ASSERT(vlun->svl_taskq);
3374 3376 svp->svp_last_pkt_reason = pkt->pkt_reason;
3375 3377 (void) taskq_dispatch(vlun->svl_taskq,
3376 3378 vhci_run_cmd, pkt, KM_SLEEP);
3377 3379 return;
3378 3380 }
3379 3381 }
3380 3382 if ((SCBP_C(pkt) == STATUS_GOOD) &&
3381 3383 (pkt->pkt_cdbp[0] == SCMD_PRIN) && vpkt->vpkt_tgt_init_bp) {
3382 3384 /*
3383 3385 * If the action (value in byte 1 of the cdb) is zero,
3384 3386 * we're reading keys, and that's the only condition
3385 3387 * where we need to be concerned with filtering keys
3386 3388 * and potential retries. Otherwise, we simply signal
3387 3389 * the semaphore and move on.
3388 3390 */
3389 3391 if (pkt->pkt_cdbp[1] == 0) {
3390 3392 /*
3391 3393 * If this is the completion of an internal
3392 3394 * retry then we need to make sure that the
3393 3395 * pkt and tpkt pointers are readjusted so
3394 3396 * the calls to scsi_destroy_pkt and pkt_comp
3395 3397 * below work * correctly.
3396 3398 */
3397 3399 if (vpkt->vpkt_org_vpkt != NULL) {
3398 3400 pkt = vpkt->vpkt_org_vpkt->vpkt_hba_pkt;
3399 3401 tpkt = vpkt->vpkt_org_vpkt->
3400 3402 vpkt_tgt_pkt;
3401 3403
3402 3404 /*
3403 3405 * If this command was issued through
3404 3406 * the taskq then we need to clear
3405 3407 * this flag for proper processing in
3406 3408 * the case of a retry from the target
3407 3409 * driver.
3408 3410 */
3409 3411 vpkt->vpkt_state &=
3410 3412 ~VHCI_PKT_THRU_TASKQ;
3411 3413 }
3412 3414
3413 3415 /*
3414 3416 * if vhci_do_prin returns VHCI_CMD_CMPLT then
3415 3417 * vpkt will contain the address of the
3416 3418 * original vpkt
3417 3419 */
3418 3420 if (vhci_do_prin(&vpkt) == VHCI_CMD_RETRY) {
3419 3421 /*
3420 3422 * The command has been resent to get
3421 3423 * all the keys from the device. Don't
3422 3424 * complete the command with ssd until
3423 3425 * the retry completes.
3424 3426 */
3425 3427 return;
3426 3428 }
3427 3429 } else {
3428 3430 sema_v(&vlun->svl_pgr_sema);
3429 3431 }
3430 3432 }
3431 3433
3432 3434 break;
3433 3435
3434 3436 case CMD_TIMEOUT:
3435 3437 if ((pkt->pkt_statistics &
3436 3438 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
3437 3439
3438 3440 VHCI_DEBUG(1, (CE_NOTE, NULL,
3439 3441 "!scsi vhci timeout invoked\n"));
3440 3442
3441 3443 (void) vhci_recovery_reset(vlun, &pkt->pkt_address,
3442 3444 FALSE, VHCI_DEPTH_ALL);
3443 3445 }
3444 3446 MDI_PI_ERRSTAT(lpath, MDI_PI_TRANSERR);
3445 3447 tpkt->pkt_statistics |= STAT_ABORTED;
3446 3448 err_str = timeout_err;
3447 3449 break;
3448 3450
3449 3451 case CMD_TRAN_ERR:
3450 3452 /*
3451 3453 * This status is returned if the transport has sent the cmd
3452 3454 * down the link to the target and then some error occurs.
3453 3455 * In case of SCSI-II RESERVE cmd, we don't know if the
3454 3456 * reservation been accepted by the target or not, so we need
3455 3457 * to clear the reservation.
3456 3458 */
3457 3459 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3458 3460 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3459 3461 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_intr received"
3460 3462 " cmd_tran_err for scsi-2 reserve cmd\n"));
3461 3463 if (!vhci_recovery_reset(vlun, &pkt->pkt_address,
3462 3464 TRUE, VHCI_DEPTH_TARGET)) {
3463 3465 VHCI_DEBUG(1, (CE_WARN, NULL,
3464 3466 "!vhci_intr cmd_tran_err reset failed!"));
3465 3467 }
3466 3468 }
3467 3469 break;
3468 3470
3469 3471 case CMD_DEV_GONE:
3470 3472 /*
3471 3473 * If this is the last path then report CMD_DEV_GONE to the
3472 3474 * target driver, otherwise report BUSY to triggger retry.
3473 3475 */
3474 3476 if (vlun->svl_dip &&
3475 3477 (mdi_client_get_path_count(vlun->svl_dip) <= 1)) {
3476 3478 struct scsi_vhci *vhci;
3477 3479 vhci = ADDR2VHCI(&tpkt->pkt_address);
3478 3480 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3479 3481 "cmd_dev_gone on last path\n"));
3480 3482 (void) vhci_invalidate_mpapi_lu(vhci, vlun);
3481 3483 break;
3482 3484 }
3483 3485
3484 3486 /* Report CMD_CMPLT-with-BUSY to cause retry. */
3485 3487 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_intr received "
3486 3488 "cmd_dev_gone\n"));
3487 3489 tpkt->pkt_reason = CMD_CMPLT;
3488 3490 tpkt->pkt_state = STATE_GOT_BUS |
3489 3491 STATE_GOT_TARGET | STATE_SENT_CMD |
3490 3492 STATE_GOT_STATUS;
3491 3493 *(tpkt->pkt_scbp) = STATUS_BUSY;
3492 3494 break;
3493 3495
3494 3496 default:
3495 3497 break;
3496 3498 }
3497 3499
3498 3500 /*
3499 3501 * SCSI-II RESERVE cmd has been serviced by the lower layers clear
3500 3502 * the flag so the lun is not QUIESCED any longer.
3501 3503 * Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
3502 3504 * is retried, a taskq shall again be dispatched to service it. Else
3503 3505 * it may lead to a system hang if the retry is within interrupt
3504 3506 * context.
3505 3507 */
3506 3508 if ((pkt->pkt_cdbp[0] == SCMD_RESERVE) ||
3507 3509 (pkt->pkt_cdbp[0] == SCMD_RESERVE_G1)) {
3508 3510 vlun->svl_flags &= ~VLUN_QUIESCED_FLG;
3509 3511 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
3510 3512 }
3511 3513
3512 3514 /*
3513 3515 * vpkt_org_vpkt should always be NULL here if the retry command
3514 3516 * has been successfully processed. If vpkt_org_vpkt != NULL at
3515 3517 * this point, it is an error so restore the original vpkt and
3516 3518 * return an error to the target driver so it can retry the
3517 3519 * command as appropriate.
3518 3520 */
3519 3521 if (vpkt->vpkt_org_vpkt != NULL) {
3520 3522 struct vhci_pkt *new_vpkt = vpkt;
3521 3523 vpkt = vpkt->vpkt_org_vpkt;
3522 3524
3523 3525 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
3524 3526 new_vpkt->vpkt_tgt_pkt);
3525 3527
3526 3528 /*
3527 3529 * Mark this command completion as having an error so that
3528 3530 * ssd will retry the command.
3529 3531 */
3530 3532 vpkt->vpkt_tgt_pkt->pkt_reason = CMD_ABORTED;
3531 3533 vpkt->vpkt_tgt_pkt->pkt_statistics |= STAT_ABORTED;
3532 3534
3533 3535 pkt = vpkt->vpkt_hba_pkt;
3534 3536 tpkt = vpkt->vpkt_tgt_pkt;
3535 3537 }
3536 3538
3537 3539 if ((err_str != NULL) && (pkt->pkt_reason !=
3538 3540 svp->svp_last_pkt_reason)) {
3539 3541 cdip = vlun->svl_dip;
3540 3542 vdip = ddi_get_parent(cdip);
3541 3543 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3542 3544 vhci_log(CE_WARN, vdip, "!%s (%s%d): %s on path %s",
3543 3545 ddi_pathname(cdip, cpath), ddi_driver_name(cdip),
3544 3546 ddi_get_instance(cdip), err_str,
3545 3547 mdi_pi_spathname(vpkt->vpkt_path));
3546 3548 kmem_free(cpath, MAXPATHLEN);
3547 3549 }
3548 3550 svp->svp_last_pkt_reason = pkt->pkt_reason;
3549 3551 VHCI_DECR_PATH_CMDCOUNT(svp);
3550 3552
3551 3553 /*
3552 3554 * For PARTIAL_DMA, vhci should not free the path.
3553 3555 * Target driver will call into vhci_scsi_dmafree or
3554 3556 * destroy pkt to release this path.
3555 3557 */
3556 3558 if ((vpkt->vpkt_flags & CFLAG_DMA_PARTIAL) == 0) {
3557 3559 scsi_destroy_pkt(pkt);
3558 3560 vpkt->vpkt_hba_pkt = NULL;
3559 3561 if (vpkt->vpkt_path) {
3560 3562 mdi_rele_path(vpkt->vpkt_path);
3561 3563 vpkt->vpkt_path = NULL;
3562 3564 }
3563 3565 }
3564 3566
3565 3567 scsi_hba_pkt_comp(tpkt);
3566 3568 }
3567 3569
3568 3570 /*
3569 3571 * two possibilities: (1) failover has completed
3570 3572 * or (2) is in progress; update our path states for
3571 3573 * the former case; for the latter case,
3572 3574 * initiate a scsi_watch request to
3573 3575 * determine when failover completes - vlun is HELD
3574 3576 * until failover completes; BUSY is returned to upper
3575 3577 * layer in both the cases
3576 3578 */
3577 3579 static int
3578 3580 vhci_handle_ext_fo(struct scsi_pkt *pkt, int fostat)
3579 3581 {
3580 3582 struct vhci_pkt *vpkt = (struct vhci_pkt *)pkt->pkt_private;
3581 3583 struct scsi_pkt *tpkt;
3582 3584 scsi_vhci_priv_t *svp;
3583 3585 scsi_vhci_lun_t *vlun;
3584 3586 struct scsi_vhci *vhci;
3585 3587 scsi_vhci_swarg_t *swarg;
3586 3588 char *path;
3587 3589
3588 3590 ASSERT(vpkt != NULL);
3589 3591 tpkt = vpkt->vpkt_tgt_pkt;
3590 3592 ASSERT(tpkt != NULL);
3591 3593 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(vpkt->vpkt_path);
3592 3594 ASSERT(svp != NULL);
3593 3595 vlun = svp->svp_svl;
3594 3596 ASSERT(vlun != NULL);
3595 3597 ASSERT(VHCI_LUN_IS_HELD(vlun));
3596 3598
3597 3599 vhci = ADDR2VHCI(&tpkt->pkt_address);
3598 3600
3599 3601 if (fostat == SCSI_SENSE_INACTIVE) {
3600 3602 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover "
3601 3603 "detected for %s; updating path states...\n",
3602 3604 vlun->svl_lun_wwn));
3603 3605 /*
3604 3606 * set the vlun flag to indicate to the task that the target
3605 3607 * port group needs updating
3606 3608 */
3607 3609 vlun->svl_flags |= VLUN_UPDATE_TPG;
3608 3610 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3609 3611 vhci_update_pathstates, (void *)vlun, KM_SLEEP);
3610 3612 } else {
3611 3613 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3612 3614 vhci_log(CE_NOTE, ddi_get_parent(vlun->svl_dip),
3613 3615 "!%s (%s%d): Waiting for externally initiated failover "
3614 3616 "to complete", ddi_pathname(vlun->svl_dip, path),
3615 3617 ddi_driver_name(vlun->svl_dip),
3616 3618 ddi_get_instance(vlun->svl_dip));
↓ open down ↓ |
1206 lines elided |
↑ open up ↑ |
3617 3619 kmem_free(path, MAXPATHLEN);
3618 3620 swarg = kmem_alloc(sizeof (*swarg), KM_NOSLEEP);
3619 3621 if (swarg == NULL) {
3620 3622 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_handle_ext_fo: "
3621 3623 "request packet allocation for %s failed....\n",
3622 3624 vlun->svl_lun_wwn));
3623 3625 VHCI_RELEASE_LUN(vlun);
3624 3626 return (PKT_RETURN);
3625 3627 }
3626 3628 swarg->svs_svp = svp;
3627 - swarg->svs_tos = ddi_get_time();
3629 + swarg->svs_tos = gethrtime();
3628 3630 swarg->svs_pi = vpkt->vpkt_path;
3629 3631 swarg->svs_release_lun = 0;
3630 3632 swarg->svs_done = 0;
3631 3633 /*
3632 3634 * place a hold on the path...we don't want it to
3633 3635 * vanish while scsi_watch is in progress
3634 3636 */
3635 3637 mdi_hold_path(vpkt->vpkt_path);
3636 3638 svp->svp_sw_token = scsi_watch_request_submit(svp->svp_psd,
3637 3639 VHCI_FOWATCH_INTERVAL, SENSE_LENGTH, vhci_efo_watch_cb,
3638 3640 (caddr_t)swarg);
3639 3641 }
3640 3642 return (BUSY_RETURN);
3641 3643 }
3642 3644
3643 3645 /*
3644 3646 * vhci_efo_watch_cb:
3645 3647 * Callback from scsi_watch request to check the failover status.
3646 3648 * Completion is either due to successful failover or timeout.
3647 3649 * Upon successful completion, vhci_update_path_states is called.
3648 3650 * For timeout condition, vhci_efo_done is called.
3649 3651 * Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
3650 3652 * terminates this request properly in a separate thread.
3651 3653 */
3652 3654
3653 3655 static int
3654 3656 vhci_efo_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
3655 3657 {
3656 3658 struct scsi_status *statusp = resultp->statusp;
3657 3659 uint8_t *sensep = (uint8_t *)resultp->sensep;
3658 3660 struct scsi_pkt *pkt = resultp->pkt;
3659 3661 scsi_vhci_swarg_t *swarg;
3660 3662 scsi_vhci_priv_t *svp;
3661 3663 scsi_vhci_lun_t *vlun;
3662 3664 struct scsi_vhci *vhci;
3663 3665 dev_info_t *vdip;
3664 3666 int rval, updt_paths;
3665 3667
3666 3668 swarg = (scsi_vhci_swarg_t *)(uintptr_t)arg;
3667 3669 svp = swarg->svs_svp;
3668 3670 if (swarg->svs_done) {
3669 3671 /*
3670 3672 * Already completed failover or timedout.
3671 3673 * Waiting for vhci_efo_done to terminate this scsi_watch.
3672 3674 */
3673 3675 return (0);
3674 3676 }
3675 3677
3676 3678 ASSERT(svp != NULL);
3677 3679 vlun = svp->svp_svl;
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
3678 3680 ASSERT(vlun != NULL);
3679 3681 ASSERT(VHCI_LUN_IS_HELD(vlun));
3680 3682 vlun->svl_efo_update_path = 0;
3681 3683 vdip = ddi_get_parent(vlun->svl_dip);
3682 3684 vhci = ddi_get_soft_state(vhci_softstate,
3683 3685 ddi_get_instance(vdip));
3684 3686
3685 3687 updt_paths = 0;
3686 3688
3687 3689 if (pkt->pkt_reason != CMD_CMPLT) {
3688 - if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3690 + if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3689 3691 swarg->svs_release_lun = 1;
3690 3692 goto done;
3691 3693 }
3692 3694 return (0);
3693 3695 }
3694 3696 if (*((unsigned char *)statusp) == STATUS_CHECK) {
3695 3697 rval = vlun->svl_fops->sfo_analyze_sense(svp->svp_psd, sensep,
3696 3698 vlun->svl_fops_ctpriv);
3697 3699 switch (rval) {
3698 3700 /*
3699 3701 * Only update path states in case path is definitely
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3700 3702 * inactive, or no failover occurred. For all other
3701 3703 * check conditions continue pinging. A unexpected
3702 3704 * check condition shouldn't cause pinging to complete
3703 3705 * prematurely.
3704 3706 */
3705 3707 case SCSI_SENSE_INACTIVE:
3706 3708 case SCSI_SENSE_NOFAILOVER:
3707 3709 updt_paths = 1;
3708 3710 break;
3709 3711 default:
3710 - if ((ddi_get_time() - swarg->svs_tos)
3712 + if ((gethrtime() - swarg->svs_tos)
3711 3713 >= VHCI_EXTFO_TIMEOUT) {
3712 3714 swarg->svs_release_lun = 1;
3713 3715 goto done;
3714 3716 }
3715 3717 return (0);
3716 3718 }
3717 3719 } else if (*((unsigned char *)statusp) ==
3718 3720 STATUS_RESERVATION_CONFLICT) {
3719 3721 updt_paths = 1;
3720 3722 } else if ((*((unsigned char *)statusp)) &
3721 3723 (STATUS_BUSY | STATUS_QFULL)) {
3722 3724 return (0);
3723 3725 }
3724 3726 if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
3725 3727 (updt_paths == 1)) {
3726 3728 /*
3727 3729 * we got here because we had detected an
3728 3730 * externally initiated failover; things
3729 3731 * have settled down now, so let's
3730 3732 * start up a task to update the
3731 3733 * path states and target port group
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
3732 3734 */
3733 3735 vlun->svl_efo_update_path = 1;
3734 3736 swarg->svs_done = 1;
3735 3737 vlun->svl_swarg = swarg;
3736 3738 vlun->svl_flags |= VLUN_UPDATE_TPG;
3737 3739 (void) taskq_dispatch(vhci->vhci_update_pathstates_taskq,
3738 3740 vhci_update_pathstates, (void *)vlun,
3739 3741 KM_SLEEP);
3740 3742 return (0);
3741 3743 }
3742 - if ((ddi_get_time() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3744 + if ((gethrtime() - swarg->svs_tos) >= VHCI_EXTFO_TIMEOUT) {
3743 3745 swarg->svs_release_lun = 1;
3744 3746 goto done;
3745 3747 }
3746 3748 return (0);
3747 3749 done:
3748 3750 swarg->svs_done = 1;
3749 3751 (void) taskq_dispatch(vhci->vhci_taskq,
3750 3752 vhci_efo_done, (void *)swarg, KM_SLEEP);
3751 3753 return (0);
3752 3754 }
3753 3755
3754 3756 /*
3755 3757 * vhci_efo_done:
3756 3758 * cleanly terminates scsi_watch and free up resources.
3757 3759 * Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
3758 3760 * or by vhci_update_path_states invoked during external initiated
3759 3761 * failover completion.
3760 3762 */
3761 3763 static void
3762 3764 vhci_efo_done(void *arg)
3763 3765 {
3764 3766 scsi_vhci_lun_t *vlun;
3765 3767 scsi_vhci_swarg_t *swarg = (scsi_vhci_swarg_t *)arg;
3766 3768 scsi_vhci_priv_t *svp = swarg->svs_svp;
3767 3769 ASSERT(svp);
3768 3770
3769 3771 vlun = svp->svp_svl;
3770 3772 ASSERT(vlun);
3771 3773
3772 3774 /* Wait for clean termination of scsi_watch */
3773 3775 (void) scsi_watch_request_terminate(svp->svp_sw_token,
3774 3776 SCSI_WATCH_TERMINATE_ALL_WAIT);
3775 3777 svp->svp_sw_token = NULL;
3776 3778
3777 3779 /* release path and freeup resources to indicate failover completion */
3778 3780 mdi_rele_path(swarg->svs_pi);
3779 3781 if (swarg->svs_release_lun) {
3780 3782 VHCI_RELEASE_LUN(vlun);
3781 3783 }
3782 3784 kmem_free((void *)swarg, sizeof (*swarg));
3783 3785 }
3784 3786
3785 3787 /*
3786 3788 * Update the path states
3787 3789 * vlun should be HELD when this is invoked.
3788 3790 * Calls vhci_efo_done to cleanup resources allocated for EFO.
3789 3791 */
3790 3792 void
3791 3793 vhci_update_pathstates(void *arg)
3792 3794 {
3793 3795 mdi_pathinfo_t *pip, *npip;
3794 3796 dev_info_t *dip;
3795 3797 struct scsi_failover_ops *fo;
3796 3798 struct scsi_vhci_priv *svp;
3797 3799 struct scsi_device *psd;
3798 3800 struct scsi_path_opinfo opinfo;
3799 3801 char *pclass, *tptr;
3800 3802 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
3801 3803 int sps; /* mdi_select_path() status */
3802 3804 char *cpath;
3803 3805 struct scsi_vhci *vhci;
3804 3806 struct scsi_pkt *pkt;
3805 3807 struct buf *bp;
3806 3808 struct scsi_vhci_priv *svp_conflict = NULL;
3807 3809
3808 3810 ASSERT(VHCI_LUN_IS_HELD(vlun));
3809 3811 dip = vlun->svl_dip;
3810 3812 pip = npip = NULL;
3811 3813
3812 3814 vhci = ddi_get_soft_state(vhci_softstate,
3813 3815 ddi_get_instance(ddi_get_parent(dip)));
3814 3816
3815 3817 sps = mdi_select_path(dip, NULL, (MDI_SELECT_ONLINE_PATH |
3816 3818 MDI_SELECT_STANDBY_PATH | MDI_SELECT_NO_PREFERRED), NULL, &npip);
3817 3819 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
3818 3820 goto done;
3819 3821 }
3820 3822
3821 3823 fo = vlun->svl_fops;
3822 3824 do {
3823 3825 pip = npip;
3824 3826 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
3825 3827 psd = svp->svp_psd;
3826 3828 if (fo->sfo_path_get_opinfo(psd, &opinfo,
3827 3829 vlun->svl_fops_ctpriv) != 0) {
3828 3830 sps = mdi_select_path(dip, NULL,
3829 3831 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3830 3832 MDI_SELECT_NO_PREFERRED), pip, &npip);
3831 3833 mdi_rele_path(pip);
3832 3834 continue;
3833 3835 }
3834 3836
3835 3837 if (mdi_prop_lookup_string(pip, "path-class", &pclass) !=
3836 3838 MDI_SUCCESS) {
3837 3839 VHCI_DEBUG(1, (CE_NOTE, NULL,
3838 3840 "!vhci_update_pathstates: prop lookup failed for "
3839 3841 "path 0x%p\n", (void *)pip));
3840 3842 sps = mdi_select_path(dip, NULL,
3841 3843 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
3842 3844 MDI_SELECT_NO_PREFERRED), pip, &npip);
3843 3845 mdi_rele_path(pip);
3844 3846 continue;
3845 3847 }
3846 3848
3847 3849 /*
3848 3850 * Need to update the "path-class" property
3849 3851 * value in the device tree if different
3850 3852 * from the existing value.
3851 3853 */
3852 3854 if (strcmp(pclass, opinfo.opinfo_path_attr) != 0) {
3853 3855 (void) mdi_prop_update_string(pip, "path-class",
3854 3856 opinfo.opinfo_path_attr);
3855 3857 }
3856 3858
3857 3859 /*
3858 3860 * Only change the state if needed. i.e. Don't call
3859 3861 * mdi_pi_set_state to ONLINE a path if its already
3860 3862 * ONLINE. Same for STANDBY paths.
3861 3863 */
3862 3864
3863 3865 if ((opinfo.opinfo_path_state == SCSI_PATH_ACTIVE ||
3864 3866 opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT)) {
3865 3867 if (!(MDI_PI_IS_ONLINE(pip))) {
3866 3868 VHCI_DEBUG(1, (CE_NOTE, NULL,
3867 3869 "!vhci_update_pathstates: marking path"
3868 3870 " 0x%p as ONLINE\n", (void *)pip));
3869 3871 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3870 3872 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3871 3873 "(%s%d): path %s "
3872 3874 "is now ONLINE because of "
3873 3875 "an externally initiated failover",
3874 3876 ddi_pathname(dip, cpath),
3875 3877 ddi_driver_name(dip),
3876 3878 ddi_get_instance(dip),
3877 3879 mdi_pi_spathname(pip));
3878 3880 kmem_free(cpath, MAXPATHLEN);
3879 3881 mdi_pi_set_state(pip,
3880 3882 MDI_PATHINFO_STATE_ONLINE);
3881 3883 mdi_pi_set_preferred(pip,
3882 3884 opinfo.opinfo_preferred);
3883 3885 tptr = kmem_alloc(strlen
3884 3886 (opinfo.opinfo_path_attr)+1, KM_SLEEP);
3885 3887 (void) strlcpy(tptr, opinfo.opinfo_path_attr,
3886 3888 (strlen(opinfo.opinfo_path_attr)+1));
3887 3889 mutex_enter(&vlun->svl_mutex);
3888 3890 if (vlun->svl_active_pclass != NULL) {
3889 3891 kmem_free(vlun->svl_active_pclass,
3890 3892 strlen(vlun->svl_active_pclass)+1);
3891 3893 }
3892 3894 vlun->svl_active_pclass = tptr;
3893 3895 if (vlun->svl_waiting_for_activepath) {
3894 3896 vlun->svl_waiting_for_activepath = 0;
3895 3897 }
3896 3898 mutex_exit(&vlun->svl_mutex);
3897 3899 } else if (MDI_PI_IS_ONLINE(pip)) {
3898 3900 if (strcmp(pclass, opinfo.opinfo_path_attr)
3899 3901 != 0) {
3900 3902 mdi_pi_set_preferred(pip,
3901 3903 opinfo.opinfo_preferred);
3902 3904 mutex_enter(&vlun->svl_mutex);
3903 3905 if (vlun->svl_active_pclass == NULL ||
3904 3906 strcmp(opinfo.opinfo_path_attr,
3905 3907 vlun->svl_active_pclass) != 0) {
3906 3908 mutex_exit(&vlun->svl_mutex);
3907 3909 tptr = kmem_alloc(strlen
3908 3910 (opinfo.opinfo_path_attr)+1,
3909 3911 KM_SLEEP);
3910 3912 (void) strlcpy(tptr,
3911 3913 opinfo.opinfo_path_attr,
3912 3914 (strlen
3913 3915 (opinfo.opinfo_path_attr)
3914 3916 +1));
3915 3917 mutex_enter(&vlun->svl_mutex);
3916 3918 } else {
3917 3919 /*
3918 3920 * No need to update
3919 3921 * svl_active_pclass
3920 3922 */
3921 3923 tptr = NULL;
3922 3924 mutex_exit(&vlun->svl_mutex);
3923 3925 }
3924 3926 if (tptr) {
3925 3927 if (vlun->svl_active_pclass
3926 3928 != NULL) {
3927 3929 kmem_free(vlun->
3928 3930 svl_active_pclass,
3929 3931 strlen(vlun->
3930 3932 svl_active_pclass)
3931 3933 +1);
3932 3934 }
3933 3935 vlun->svl_active_pclass = tptr;
3934 3936 mutex_exit(&vlun->svl_mutex);
3935 3937 }
3936 3938 }
3937 3939 }
3938 3940
3939 3941 /* Check for Reservation Conflict */
3940 3942 bp = scsi_alloc_consistent_buf(
3941 3943 &svp->svp_psd->sd_address, (struct buf *)NULL,
3942 3944 DEV_BSIZE, B_READ, NULL, NULL);
3943 3945 if (!bp) {
3944 3946 VHCI_DEBUG(1, (CE_NOTE, NULL,
3945 3947 "!vhci_update_pathstates: No resources "
3946 3948 "(buf)\n"));
3947 3949 mdi_rele_path(pip);
3948 3950 goto done;
3949 3951 }
3950 3952 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
3951 3953 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
3952 3954 PKT_CONSISTENT, NULL, NULL);
3953 3955 if (pkt) {
3954 3956 (void) scsi_setup_cdb((union scsi_cdb *)
3955 3957 (uintptr_t)pkt->pkt_cdbp, SCMD_READ, 1, 1,
3956 3958 0);
3957 3959 pkt->pkt_time = 3*30;
3958 3960 pkt->pkt_flags = FLAG_NOINTR;
3959 3961 pkt->pkt_path_instance =
3960 3962 mdi_pi_get_path_instance(pip);
3961 3963
3962 3964 if ((scsi_transport(pkt) == TRAN_ACCEPT) &&
3963 3965 (pkt->pkt_reason == CMD_CMPLT) &&
3964 3966 (SCBP_C(pkt) ==
3965 3967 STATUS_RESERVATION_CONFLICT)) {
3966 3968 VHCI_DEBUG(1, (CE_NOTE, NULL,
3967 3969 "!vhci_update_pathstates: reserv. "
3968 3970 "conflict to be resolved on 0x%p\n",
3969 3971 (void *)pip));
3970 3972 svp_conflict = svp;
3971 3973 }
3972 3974 scsi_destroy_pkt(pkt);
3973 3975 }
3974 3976 scsi_free_consistent_buf(bp);
3975 3977 } else if ((opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) &&
3976 3978 !(MDI_PI_IS_STANDBY(pip))) {
3977 3979 VHCI_DEBUG(1, (CE_NOTE, NULL,
3978 3980 "!vhci_update_pathstates: marking path"
3979 3981 " 0x%p as STANDBY\n", (void *)pip));
3980 3982 cpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3981 3983 vhci_log(CE_NOTE, ddi_get_parent(dip), "!%s "
3982 3984 "(%s%d): path %s "
3983 3985 "is now STANDBY because of "
3984 3986 "an externally initiated failover",
3985 3987 ddi_pathname(dip, cpath),
3986 3988 ddi_driver_name(dip),
3987 3989 ddi_get_instance(dip),
3988 3990 mdi_pi_spathname(pip));
3989 3991 kmem_free(cpath, MAXPATHLEN);
3990 3992 mdi_pi_set_state(pip,
3991 3993 MDI_PATHINFO_STATE_STANDBY);
3992 3994 mdi_pi_set_preferred(pip,
3993 3995 opinfo.opinfo_preferred);
3994 3996 mutex_enter(&vlun->svl_mutex);
3995 3997 if (vlun->svl_active_pclass != NULL) {
3996 3998 if (strcmp(vlun->svl_active_pclass,
3997 3999 opinfo.opinfo_path_attr) == 0) {
3998 4000 kmem_free(vlun->
3999 4001 svl_active_pclass,
4000 4002 strlen(vlun->
4001 4003 svl_active_pclass)+1);
4002 4004 vlun->svl_active_pclass = NULL;
4003 4005 }
4004 4006 }
4005 4007 mutex_exit(&vlun->svl_mutex);
4006 4008 }
4007 4009 (void) mdi_prop_free(pclass);
4008 4010 sps = mdi_select_path(dip, NULL,
4009 4011 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH |
4010 4012 MDI_SELECT_NO_PREFERRED), pip, &npip);
4011 4013 mdi_rele_path(pip);
4012 4014
4013 4015 } while ((npip != NULL) && (sps == MDI_SUCCESS));
4014 4016
4015 4017 /*
4016 4018 * Check to see if this vlun has an active SCSI-II RESERVE. If so
4017 4019 * clear the reservation by sending a reset, so the host doesn't
4018 4020 * receive a reservation conflict. The reset has to be sent via a
4019 4021 * working path. Let's use a path referred to by svp_conflict as it
4020 4022 * should be working.
4021 4023 * Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
4022 4024 * of the reset, explicitly.
4023 4025 */
4024 4026 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4025 4027 if (svp_conflict && (vlun->svl_xlf_capable == 0)) {
4026 4028 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathstates:"
4027 4029 " sending recovery reset on 0x%p, path_state: %x",
4028 4030 svp_conflict->svp_psd->sd_private,
4029 4031 mdi_pi_get_state((mdi_pathinfo_t *)
4030 4032 svp_conflict->svp_psd->sd_private)));
4031 4033
4032 4034 (void) vhci_recovery_reset(vlun,
4033 4035 &svp_conflict->svp_psd->sd_address, FALSE,
4034 4036 VHCI_DEPTH_TARGET);
4035 4037 }
4036 4038 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
4037 4039 mutex_enter(&vhci->vhci_mutex);
4038 4040 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
4039 4041 &vhci->vhci_reset_notify_listf);
4040 4042 mutex_exit(&vhci->vhci_mutex);
4041 4043 }
4042 4044 if (vlun->svl_flags & VLUN_UPDATE_TPG) {
4043 4045 /*
4044 4046 * Update the AccessState of related MP-API TPGs
4045 4047 */
4046 4048 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
4047 4049 vlun->svl_flags &= ~VLUN_UPDATE_TPG;
4048 4050 }
4049 4051 done:
4050 4052 if (vlun->svl_efo_update_path) {
4051 4053 vlun->svl_efo_update_path = 0;
4052 4054 vhci_efo_done(vlun->svl_swarg);
4053 4055 vlun->svl_swarg = 0;
4054 4056 }
4055 4057 VHCI_RELEASE_LUN(vlun);
4056 4058 }
4057 4059
4058 4060 /* ARGSUSED */
4059 4061 static int
4060 4062 vhci_pathinfo_init(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4061 4063 {
4062 4064 scsi_hba_tran_t *hba = NULL;
4063 4065 struct scsi_device *psd = NULL;
4064 4066 scsi_vhci_lun_t *vlun = NULL;
4065 4067 dev_info_t *pdip = NULL;
4066 4068 dev_info_t *tgt_dip;
4067 4069 struct scsi_vhci *vhci;
4068 4070 char *guid;
4069 4071 scsi_vhci_priv_t *svp = NULL;
4070 4072 int rval = MDI_FAILURE;
4071 4073 int vlun_alloced = 0;
4072 4074
4073 4075 ASSERT(vdip != NULL);
4074 4076 ASSERT(pip != NULL);
4075 4077
4076 4078 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4077 4079 ASSERT(vhci != NULL);
4078 4080
4079 4081 pdip = mdi_pi_get_phci(pip);
4080 4082 ASSERT(pdip != NULL);
4081 4083
4082 4084 hba = ddi_get_driver_private(pdip);
4083 4085 ASSERT(hba != NULL);
4084 4086
4085 4087 tgt_dip = mdi_pi_get_client(pip);
4086 4088 ASSERT(tgt_dip != NULL);
4087 4089
4088 4090 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip, PROPFLAGS,
4089 4091 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
4090 4092 VHCI_DEBUG(1, (CE_WARN, NULL,
4091 4093 "vhci_pathinfo_init: lun guid property failed"));
4092 4094 goto failure;
4093 4095 }
4094 4096
4095 4097 vlun = vhci_lun_lookup_alloc(tgt_dip, guid, &vlun_alloced);
4096 4098 ddi_prop_free(guid);
4097 4099
4098 4100 vlun->svl_dip = tgt_dip;
4099 4101
4100 4102 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
4101 4103 svp->svp_svl = vlun;
4102 4104
4103 4105 /*
4104 4106 * Initialize svl_lb_policy_save only for newly allocated vlun. Writing
4105 4107 * to svl_lb_policy_save later could accidentally overwrite saved lb
4106 4108 * policy.
4107 4109 */
4108 4110 if (vlun_alloced) {
4109 4111 vlun->svl_lb_policy_save = mdi_get_lb_policy(tgt_dip);
4110 4112 }
4111 4113
4112 4114 mutex_init(&svp->svp_mutex, NULL, MUTEX_DRIVER, NULL);
4113 4115 cv_init(&svp->svp_cv, NULL, CV_DRIVER, NULL);
4114 4116
4115 4117 psd = kmem_zalloc(sizeof (*psd), KM_SLEEP);
4116 4118 mutex_init(&psd->sd_mutex, NULL, MUTEX_DRIVER, NULL);
4117 4119
4118 4120 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4119 4121 /*
4120 4122 * For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
4121 4123 * scsi_device in the scsi_address structure. This allows an
4122 4124 * an HBA driver to find its scsi_device(9S) and
4123 4125 * per-scsi_device(9S) HBA private data given a
4124 4126 * scsi_address(9S) by using scsi_address_device(9F) and
4125 4127 * scsi_device_hba_private_get(9F)).
4126 4128 */
4127 4129 psd->sd_address.a.a_sd = psd;
4128 4130 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4129 4131 /*
4130 4132 * Clone transport structure if requested, so
4131 4133 * Self enumerating HBAs always need to use cloning
4132 4134 */
4133 4135 scsi_hba_tran_t *clone =
4134 4136 kmem_alloc(sizeof (scsi_hba_tran_t), KM_SLEEP);
4135 4137 bcopy(hba, clone, sizeof (scsi_hba_tran_t));
4136 4138 hba = clone;
4137 4139 hba->tran_sd = psd;
4138 4140 } else {
4139 4141 /*
4140 4142 * SPI pHCI unit-address. If we ever need to support this
4141 4143 * we could set a.spi.a_target/a.spi.a_lun based on pathinfo
4142 4144 * node unit-address properties. For now we fail...
4143 4145 */
4144 4146 goto failure;
4145 4147 }
4146 4148
4147 4149 psd->sd_dev = tgt_dip;
4148 4150 psd->sd_address.a_hba_tran = hba;
4149 4151
4150 4152 /*
4151 4153 * Mark scsi_device as being associated with a pathinfo node. For
4152 4154 * a scsi_device structure associated with a devinfo node,
4153 4155 * scsi_ctlops_initchild sets this field to NULL.
4154 4156 */
4155 4157 psd->sd_pathinfo = pip;
4156 4158
4157 4159 /*
4158 4160 * LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
4159 4161 * too much scsi_vhci/mdi/ndi knowledge. Remove this code when all
4160 4162 * mpxio-capable pHCI drivers use SCSA enumeration services (or at
4161 4163 * least have been changed to use sd_pathinfo instead).
4162 4164 */
4163 4165 psd->sd_private = (caddr_t)pip;
4164 4166
4165 4167 /* See scsi_hba.c for info on sd_tran_safe kludge */
4166 4168 psd->sd_tran_safe = hba;
4167 4169
4168 4170 svp->svp_psd = psd;
4169 4171 mdi_pi_set_vhci_private(pip, (caddr_t)svp);
4170 4172
4171 4173 /*
4172 4174 * call hba's target init entry point if it exists
4173 4175 */
4174 4176 if (hba->tran_tgt_init != NULL) {
4175 4177 psd->sd_tran_tgt_free_done = 0;
4176 4178 if ((rval = (*hba->tran_tgt_init)(pdip, tgt_dip,
4177 4179 hba, psd)) != DDI_SUCCESS) {
4178 4180 VHCI_DEBUG(1, (CE_WARN, pdip,
4179 4181 "!vhci_pathinfo_init: tran_tgt_init failed for "
4180 4182 "path=0x%p rval=%x", (void *)pip, rval));
4181 4183 goto failure;
4182 4184 }
4183 4185 }
4184 4186
4185 4187 svp->svp_new_path = 1;
4186 4188
4187 4189 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_init: path:%p\n",
4188 4190 (void *)pip));
4189 4191 return (MDI_SUCCESS);
4190 4192
4191 4193 failure:
4192 4194 if (psd) {
4193 4195 mutex_destroy(&psd->sd_mutex);
4194 4196 kmem_free(psd, sizeof (*psd));
4195 4197 }
4196 4198 if (svp) {
4197 4199 mdi_pi_set_vhci_private(pip, NULL);
4198 4200 mutex_destroy(&svp->svp_mutex);
4199 4201 cv_destroy(&svp->svp_cv);
4200 4202 kmem_free(svp, sizeof (*svp));
4201 4203 }
4202 4204 if (hba && (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE))
4203 4205 kmem_free(hba, sizeof (scsi_hba_tran_t));
4204 4206
4205 4207 if (vlun_alloced)
4206 4208 vhci_lun_free(vlun, NULL);
4207 4209
4208 4210 return (rval);
4209 4211 }
4210 4212
4211 4213 /* ARGSUSED */
4212 4214 static int
4213 4215 vhci_pathinfo_uninit(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
4214 4216 {
4215 4217 scsi_hba_tran_t *hba = NULL;
4216 4218 struct scsi_device *psd = NULL;
4217 4219 dev_info_t *pdip = NULL;
4218 4220 dev_info_t *cdip = NULL;
4219 4221 scsi_vhci_priv_t *svp = NULL;
4220 4222
4221 4223 ASSERT(vdip != NULL);
4222 4224 ASSERT(pip != NULL);
4223 4225
4224 4226 pdip = mdi_pi_get_phci(pip);
4225 4227 ASSERT(pdip != NULL);
4226 4228
4227 4229 cdip = mdi_pi_get_client(pip);
4228 4230 ASSERT(cdip != NULL);
4229 4231
4230 4232 hba = ddi_get_driver_private(pdip);
4231 4233 ASSERT(hba != NULL);
4232 4234
4233 4235 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_UNINIT);
4234 4236 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4235 4237 if (svp == NULL) {
4236 4238 /* path already freed. Nothing to do. */
4237 4239 return (MDI_SUCCESS);
4238 4240 }
4239 4241
4240 4242 psd = svp->svp_psd;
4241 4243 ASSERT(psd != NULL);
4242 4244
4243 4245 if (hba->tran_hba_flags & SCSI_HBA_ADDR_COMPLEX) {
4244 4246 /* Verify plumbing */
4245 4247 ASSERT(psd->sd_address.a_hba_tran == hba);
4246 4248 ASSERT(psd->sd_address.a.a_sd == psd);
4247 4249 } else if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4248 4250 /* Switch to cloned scsi_hba_tran(9S) structure */
4249 4251 hba = psd->sd_address.a_hba_tran;
4250 4252 ASSERT(hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE);
4251 4253 ASSERT(hba->tran_sd == psd);
4252 4254 }
4253 4255
4254 4256 if ((hba->tran_tgt_free != NULL) && !psd->sd_tran_tgt_free_done) {
4255 4257 (*hba->tran_tgt_free) (pdip, cdip, hba, psd);
4256 4258 psd->sd_tran_tgt_free_done = 1;
4257 4259 }
4258 4260 mutex_destroy(&psd->sd_mutex);
4259 4261 if (hba->tran_hba_flags & SCSI_HBA_TRAN_CLONE) {
4260 4262 kmem_free(hba, sizeof (*hba));
4261 4263 }
4262 4264
4263 4265 mdi_pi_set_vhci_private(pip, NULL);
4264 4266
4265 4267 /*
4266 4268 * Free the pathinfo related scsi_device inquiry data. Note that this
4267 4269 * matches what happens for scsi_hba.c devinfo case at uninitchild time.
4268 4270 */
4269 4271 if (psd->sd_inq)
4270 4272 kmem_free((caddr_t)psd->sd_inq, sizeof (struct scsi_inquiry));
4271 4273 kmem_free((caddr_t)psd, sizeof (*psd));
4272 4274
4273 4275 mutex_destroy(&svp->svp_mutex);
4274 4276 cv_destroy(&svp->svp_cv);
4275 4277 kmem_free((caddr_t)svp, sizeof (*svp));
4276 4278
4277 4279 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_pathinfo_uninit: path=0x%p\n",
4278 4280 (void *)pip));
4279 4281 return (MDI_SUCCESS);
4280 4282 }
4281 4283
4282 4284 /* ARGSUSED */
4283 4285 static int
4284 4286 vhci_pathinfo_state_change(dev_info_t *vdip, mdi_pathinfo_t *pip,
4285 4287 mdi_pathinfo_state_t state, uint32_t ext_state, int flags)
4286 4288 {
4287 4289 int rval = MDI_SUCCESS;
4288 4290 scsi_vhci_priv_t *svp;
4289 4291 scsi_vhci_lun_t *vlun;
4290 4292 int held;
4291 4293 int op = (flags & 0xf00) >> 8;
4292 4294 struct scsi_vhci *vhci;
4293 4295
4294 4296 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
4295 4297
4296 4298 if (flags & MDI_EXT_STATE_CHANGE) {
4297 4299 /*
4298 4300 * We do not want to issue any commands down the path in case
4299 4301 * sync flag is set. Lower layers might not be ready to accept
4300 4302 * any I/O commands.
4301 4303 */
4302 4304 if (op == DRIVER_DISABLE)
4303 4305 return (MDI_SUCCESS);
4304 4306
4305 4307 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
4306 4308 if (svp == NULL) {
4307 4309 return (MDI_FAILURE);
4308 4310 }
4309 4311 vlun = svp->svp_svl;
4310 4312
4311 4313 if (flags & MDI_BEFORE_STATE_CHANGE) {
4312 4314 /*
4313 4315 * Hold the LUN.
4314 4316 */
4315 4317 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
4316 4318 if (flags & MDI_DISABLE_OP) {
4317 4319 /*
4318 4320 * Issue scsi reset if it happens to be
4319 4321 * reserved path.
4320 4322 */
4321 4323 if (vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
4322 4324 /*
4323 4325 * if reservation pending on
4324 4326 * this path, dont' mark the
4325 4327 * path busy
4326 4328 */
4327 4329 if (op == DRIVER_DISABLE_TRANSIENT) {
4328 4330 VHCI_DEBUG(1, (CE_NOTE, NULL,
4329 4331 "!vhci_pathinfo"
4330 4332 "_state_change (pip:%p): "
4331 4333 " reservation: fail busy\n",
4332 4334 (void *)pip));
4333 4335 return (MDI_FAILURE);
4334 4336 }
4335 4337 if (pip == vlun->svl_resrv_pip) {
4336 4338 if (vhci_recovery_reset(
4337 4339 svp->svp_svl,
4338 4340 &svp->svp_psd->sd_address,
4339 4341 TRUE,
4340 4342 VHCI_DEPTH_TARGET) == 0) {
4341 4343 VHCI_DEBUG(1,
4342 4344 (CE_NOTE, NULL,
4343 4345 "!vhci_pathinfo"
4344 4346 "_state_change "
4345 4347 " (pip:%p): "
4346 4348 "reset failed, "
4347 4349 "give up!\n",
4348 4350 (void *)pip));
4349 4351 }
4350 4352 vlun->svl_flags &=
4351 4353 ~VLUN_RESERVE_ACTIVE_FLG;
4352 4354 }
4353 4355 }
4354 4356 } else if (flags & MDI_ENABLE_OP) {
4355 4357 if (((vhci->vhci_conf_flags &
4356 4358 VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4357 4359 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4358 4360 MDI_PI_IS_USER_DISABLE(pip) &&
4359 4361 MDI_PI_IS_STANDBY(pip)) {
4360 4362 struct scsi_failover_ops *fo;
4361 4363 char *best_pclass, *pclass = NULL;
4362 4364 int best_class, rv;
4363 4365 /*
4364 4366 * Failback if enabling a standby path
4365 4367 * and it is the primary class or
4366 4368 * preferred class
4367 4369 */
4368 4370 best_class = mdi_pi_get_preferred(pip);
4369 4371 if (best_class == 0) {
4370 4372 /*
4371 4373 * if not preferred - compare
4372 4374 * path-class with class
4373 4375 */
4374 4376 fo = vlun->svl_fops;
4375 4377 (void) fo->sfo_pathclass_next(
4376 4378 NULL, &best_pclass,
4377 4379 vlun->svl_fops_ctpriv);
4378 4380 pclass = NULL;
4379 4381 rv = mdi_prop_lookup_string(pip,
4380 4382 "path-class", &pclass);
4381 4383 if (rv != MDI_SUCCESS ||
4382 4384 pclass == NULL) {
4383 4385 vhci_log(CE_NOTE, vdip,
4384 4386 "!path-class "
4385 4387 " lookup "
4386 4388 "failed. rv: %d"
4387 4389 "class: %p", rv,
4388 4390 (void *)pclass);
4389 4391 } else if (strncmp(pclass,
4390 4392 best_pclass,
4391 4393 strlen(best_pclass)) == 0) {
4392 4394 best_class = 1;
4393 4395 }
4394 4396 if (rv == MDI_SUCCESS &&
4395 4397 pclass != NULL) {
4396 4398 rv = mdi_prop_free(
4397 4399 pclass);
4398 4400 if (rv !=
4399 4401 DDI_PROP_SUCCESS) {
4400 4402 vhci_log(
4401 4403 CE_NOTE,
4402 4404 vdip,
4403 4405 "!path-"
4404 4406 "class"
4405 4407 " free"
4406 4408 " failed"
4407 4409 " rv: %d"
4408 4410 " class: "
4409 4411 "%p",
4410 4412 rv,
4411 4413 (void *)
4412 4414 pclass);
4413 4415 }
4414 4416 }
4415 4417 }
4416 4418 if (best_class == 1) {
4417 4419 VHCI_DEBUG(1, (CE_NOTE, NULL,
4418 4420 "preferred path: %p "
4419 4421 "USER_DISABLE->USER_ENABLE "
4420 4422 "transition for lun %s\n",
4421 4423 (void *)pip,
4422 4424 vlun->svl_lun_wwn));
4423 4425 (void) taskq_dispatch(
4424 4426 vhci->vhci_taskq,
4425 4427 vhci_initiate_auto_failback,
4426 4428 (void *) vlun, KM_SLEEP);
4427 4429 }
4428 4430 }
4429 4431 /*
4430 4432 * if PGR is active, revalidate key and
4431 4433 * register on this path also, if key is
4432 4434 * still valid
4433 4435 */
4434 4436 sema_p(&vlun->svl_pgr_sema);
4435 4437 if (vlun->svl_pgr_active)
4436 4438 (void)
4437 4439 vhci_pgr_validate_and_register(svp);
4438 4440 sema_v(&vlun->svl_pgr_sema);
4439 4441 /*
4440 4442 * Inform target driver about any
4441 4443 * reservations to be reinstated if target
4442 4444 * has dropped reservation during the busy
4443 4445 * period.
4444 4446 */
4445 4447 mutex_enter(&vhci->vhci_mutex);
4446 4448 scsi_hba_reset_notify_callback(
4447 4449 &vhci->vhci_mutex,
4448 4450 &vhci->vhci_reset_notify_listf);
4449 4451 mutex_exit(&vhci->vhci_mutex);
4450 4452 }
4451 4453 }
4452 4454 if (flags & MDI_AFTER_STATE_CHANGE) {
4453 4455 if (flags & MDI_ENABLE_OP) {
4454 4456 mutex_enter(&vhci_global_mutex);
4455 4457 cv_broadcast(&vhci_cv);
4456 4458 mutex_exit(&vhci_global_mutex);
4457 4459 }
4458 4460 if (vlun->svl_setcap_done) {
4459 4461 (void) vhci_pHCI_cap(&svp->svp_psd->sd_address,
4460 4462 "sector-size", vlun->svl_sector_size,
4461 4463 1, pip);
4462 4464 }
4463 4465
4464 4466 /*
4465 4467 * Release the LUN
4466 4468 */
4467 4469 VHCI_RELEASE_LUN(vlun);
4468 4470
4469 4471 /*
4470 4472 * Path transition is complete.
4471 4473 * Run callback to indicate target driver to
4472 4474 * retry to prevent IO starvation.
4473 4475 */
4474 4476 if (scsi_callback_id != 0) {
4475 4477 ddi_run_callback(&scsi_callback_id);
4476 4478 }
4477 4479 }
4478 4480 } else {
4479 4481 switch (state) {
4480 4482 case MDI_PATHINFO_STATE_ONLINE:
4481 4483 rval = vhci_pathinfo_online(vdip, pip, flags);
4482 4484 break;
4483 4485
4484 4486 case MDI_PATHINFO_STATE_OFFLINE:
4485 4487 rval = vhci_pathinfo_offline(vdip, pip, flags);
4486 4488 break;
4487 4489
4488 4490 default:
4489 4491 break;
4490 4492 }
4491 4493 /*
4492 4494 * Path transition is complete.
4493 4495 * Run callback to indicate target driver to
4494 4496 * retry to prevent IO starvation.
4495 4497 */
4496 4498 if ((rval == MDI_SUCCESS) && (scsi_callback_id != 0)) {
4497 4499 ddi_run_callback(&scsi_callback_id);
4498 4500 }
4499 4501 return (rval);
4500 4502 }
4501 4503
4502 4504 return (MDI_SUCCESS);
4503 4505 }
4504 4506
4505 4507 /*
4506 4508 * Parse the mpxio load balancing options. The datanameptr
4507 4509 * will point to a string containing the load-balance-options value.
4508 4510 * The load-balance-options value will be a property that
4509 4511 * defines the load-balance algorithm and any arguments to that
4510 4512 * algorithm.
4511 4513 * For example:
4512 4514 * device-type-mpxio-options-list=
4513 4515 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4514 4516 * "device-type=SUN SE6920", "round-robin-options";
4515 4517 * logical-block-options="load-balance=logical-block", "region-size=15";
4516 4518 * round-robin-options="load-balance=round-robin";
4517 4519 *
4518 4520 * If the load-balance is not defined the load balance algorithm will
4519 4521 * default to the global setting. There will be default values assigned
4520 4522 * to the arguments (region-size=18) and if an argument is one
4521 4523 * that is not known, it will be ignored.
4522 4524 */
4523 4525 static void
4524 4526 vhci_parse_mpxio_lb_options(dev_info_t *dip, dev_info_t *cdip,
4525 4527 caddr_t datanameptr)
4526 4528 {
4527 4529 char *dataptr, *next_entry;
4528 4530 caddr_t config_list = NULL;
4529 4531 int config_list_len = 0, list_len = 0;
4530 4532 int region_size = -1;
4531 4533 client_lb_t load_balance;
4532 4534
4533 4535 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, datanameptr,
4534 4536 (caddr_t)&config_list, &config_list_len) != DDI_PROP_SUCCESS) {
4535 4537 return;
4536 4538 }
4537 4539
4538 4540 list_len = config_list_len;
4539 4541 next_entry = config_list;
4540 4542 while (config_list_len > 0) {
4541 4543 dataptr = next_entry;
4542 4544
4543 4545 if (strncmp(mdi_load_balance, dataptr,
4544 4546 strlen(mdi_load_balance)) == 0) {
4545 4547 /* get the load-balance scheme */
4546 4548 dataptr += strlen(mdi_load_balance) + 1;
4547 4549 if (strcmp(dataptr, LOAD_BALANCE_PROP_RR) == 0) {
4548 4550 (void) mdi_set_lb_policy(cdip, LOAD_BALANCE_RR);
4549 4551 load_balance = LOAD_BALANCE_RR;
4550 4552 } else if (strcmp(dataptr,
4551 4553 LOAD_BALANCE_PROP_LBA) == 0) {
4552 4554 (void) mdi_set_lb_policy(cdip,
4553 4555 LOAD_BALANCE_LBA);
4554 4556 load_balance = LOAD_BALANCE_LBA;
4555 4557 } else if (strcmp(dataptr,
4556 4558 LOAD_BALANCE_PROP_NONE) == 0) {
4557 4559 (void) mdi_set_lb_policy(cdip,
4558 4560 LOAD_BALANCE_NONE);
4559 4561 load_balance = LOAD_BALANCE_NONE;
4560 4562 }
4561 4563 } else if (strncmp(dataptr, LOGICAL_BLOCK_REGION_SIZE,
4562 4564 strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
4563 4565 int i = 0;
4564 4566 char *ptr;
4565 4567 char *tmp;
4566 4568
4567 4569 tmp = dataptr + (strlen(LOGICAL_BLOCK_REGION_SIZE) + 1);
4568 4570 /* check for numeric value */
4569 4571 for (ptr = tmp; i < strlen(tmp); i++, ptr++) {
4570 4572 if (!isdigit(*ptr)) {
4571 4573 cmn_err(CE_WARN,
4572 4574 "Illegal region size: %s."
4573 4575 " Setting to default value: %d",
4574 4576 tmp,
4575 4577 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4576 4578 region_size =
4577 4579 LOAD_BALANCE_DEFAULT_REGION_SIZE;
4578 4580 break;
4579 4581 }
4580 4582 }
4581 4583 if (i >= strlen(tmp)) {
4582 4584 region_size = stoi(&tmp);
4583 4585 }
4584 4586 (void) mdi_set_lb_region_size(cdip, region_size);
4585 4587 }
4586 4588 config_list_len -= (strlen(next_entry) + 1);
4587 4589 next_entry += strlen(next_entry) + 1;
4588 4590 }
4589 4591 #ifdef DEBUG
4590 4592 if ((region_size >= 0) && (load_balance != LOAD_BALANCE_LBA)) {
4591 4593 VHCI_DEBUG(1, (CE_NOTE, dip,
4592 4594 "!vhci_parse_mpxio_lb_options: region-size: %d"
4593 4595 "only valid for load-balance=logical-block\n",
4594 4596 region_size));
4595 4597 }
4596 4598 #endif
4597 4599 if ((region_size == -1) && (load_balance == LOAD_BALANCE_LBA)) {
4598 4600 VHCI_DEBUG(1, (CE_NOTE, dip,
4599 4601 "!vhci_parse_mpxio_lb_options: No region-size"
4600 4602 " defined load-balance=logical-block."
4601 4603 " Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
4602 4604 (void) mdi_set_lb_region_size(cdip,
4603 4605 LOAD_BALANCE_DEFAULT_REGION_SIZE);
4604 4606 }
4605 4607 if (list_len > 0) {
4606 4608 kmem_free(config_list, list_len);
4607 4609 }
4608 4610 }
4609 4611
4610 4612 /*
4611 4613 * Parse the device-type-mpxio-options-list looking for the key of
4612 4614 * "load-balance-options". If found, parse the load balancing options.
4613 4615 * Check the comment of the vhci_get_device_type_mpxio_options()
4614 4616 * for the device-type-mpxio-options-list.
4615 4617 */
4616 4618 static void
4617 4619 vhci_parse_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4618 4620 caddr_t datanameptr, int list_len)
4619 4621 {
4620 4622 char *dataptr;
4621 4623 int len;
4622 4624
4623 4625 /*
4624 4626 * get the data list
4625 4627 */
4626 4628 dataptr = datanameptr;
4627 4629 len = 0;
4628 4630 while (len < list_len &&
4629 4631 strncmp(dataptr, DEVICE_TYPE_STR, strlen(DEVICE_TYPE_STR))
4630 4632 != 0) {
4631 4633 if (strncmp(dataptr, LOAD_BALANCE_OPTIONS,
4632 4634 strlen(LOAD_BALANCE_OPTIONS)) == 0) {
4633 4635 len += strlen(LOAD_BALANCE_OPTIONS) + 1;
4634 4636 dataptr += strlen(LOAD_BALANCE_OPTIONS) + 1;
4635 4637 vhci_parse_mpxio_lb_options(dip, cdip, dataptr);
4636 4638 }
4637 4639 len += strlen(dataptr) + 1;
4638 4640 dataptr += strlen(dataptr) + 1;
4639 4641 }
4640 4642 }
4641 4643
4642 4644 /*
4643 4645 * Check the inquriy string returned from the device with the device-type
4644 4646 * Check for the existence of the device-type-mpxio-options-list and
4645 4647 * if found parse the list checking for a match with the device-type
4646 4648 * value and the inquiry string returned from the device. If a match
4647 4649 * is found, parse the mpxio options list. The format of the
4648 4650 * device-type-mpxio-options-list is:
4649 4651 * device-type-mpxio-options-list=
4650 4652 * "device-type=SUN SENA", "load-balance-options=logical-block-options"
4651 4653 * "device-type=SUN SE6920", "round-robin-options";
4652 4654 * logical-block-options="load-balance=logical-block", "region-size=15";
4653 4655 * round-robin-options="load-balance=round-robin";
4654 4656 */
4655 4657 void
4656 4658 vhci_get_device_type_mpxio_options(dev_info_t *dip, dev_info_t *cdip,
4657 4659 struct scsi_device *devp)
4658 4660 {
4659 4661
4660 4662 caddr_t config_list = NULL;
4661 4663 caddr_t vidptr, datanameptr;
4662 4664 int vidlen, dupletlen = 0;
4663 4665 int config_list_len = 0, len;
4664 4666 struct scsi_inquiry *inq = devp->sd_inq;
4665 4667
4666 4668 /*
4667 4669 * look up the device-type-mpxio-options-list and walk thru
4668 4670 * the list compare the vendor ids of the earlier inquiry command and
4669 4671 * with those vids in the list if there is a match, lookup
4670 4672 * the mpxio-options value
4671 4673 */
4672 4674 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
4673 4675 MPXIO_OPTIONS_LIST,
4674 4676 (caddr_t)&config_list, &config_list_len) == DDI_PROP_SUCCESS) {
4675 4677
4676 4678 /*
4677 4679 * Compare vids in each duplet - if it matches,
4678 4680 * parse the mpxio options list.
4679 4681 */
4680 4682 for (len = config_list_len, vidptr = config_list; len > 0;
4681 4683 len -= dupletlen) {
4682 4684
4683 4685 dupletlen = 0;
4684 4686
4685 4687 if (strlen(vidptr) != 0 &&
4686 4688 strncmp(vidptr, DEVICE_TYPE_STR,
4687 4689 strlen(DEVICE_TYPE_STR)) == 0) {
4688 4690 /* point to next duplet */
4689 4691 datanameptr = vidptr + strlen(vidptr) + 1;
4690 4692 /* add len of this duplet */
4691 4693 dupletlen += strlen(vidptr) + 1;
4692 4694 /* get to device type */
4693 4695 vidptr += strlen(DEVICE_TYPE_STR) + 1;
4694 4696 vidlen = strlen(vidptr);
4695 4697 if ((vidlen != 0) &&
4696 4698 bcmp(inq->inq_vid, vidptr, vidlen) == 0) {
4697 4699 vhci_parse_mpxio_options(dip, cdip,
4698 4700 datanameptr, len - dupletlen);
4699 4701 break;
4700 4702 }
4701 4703 /* get to next duplet */
4702 4704 vidptr += strlen(vidptr) + 1;
4703 4705 }
4704 4706 /* get to the next device-type */
4705 4707 while (len - dupletlen > 0 &&
4706 4708 strlen(vidptr) != 0 &&
4707 4709 strncmp(vidptr, DEVICE_TYPE_STR,
4708 4710 strlen(DEVICE_TYPE_STR)) != 0) {
4709 4711 dupletlen += strlen(vidptr) + 1;
4710 4712 vidptr += strlen(vidptr) + 1;
4711 4713 }
4712 4714 }
4713 4715 if (config_list_len > 0) {
4714 4716 kmem_free(config_list, config_list_len);
4715 4717 }
4716 4718 }
4717 4719 }
4718 4720
4719 4721 static int
4720 4722 vhci_update_pathinfo(struct scsi_device *psd, mdi_pathinfo_t *pip,
4721 4723 struct scsi_failover_ops *fo,
4722 4724 scsi_vhci_lun_t *vlun,
4723 4725 struct scsi_vhci *vhci)
4724 4726 {
4725 4727 struct scsi_path_opinfo opinfo;
4726 4728 char *pclass, *best_pclass;
4727 4729 char *resrv_pclass = NULL;
4728 4730 int force_rereserve = 0;
4729 4731 int update_pathinfo_done = 0;
4730 4732
4731 4733 if (fo->sfo_path_get_opinfo(psd, &opinfo, vlun->svl_fops_ctpriv) != 0) {
4732 4734 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_update_pathinfo: "
4733 4735 "Failed to get operation info for path:%p\n", (void *)pip));
4734 4736 return (MDI_FAILURE);
4735 4737 }
4736 4738 /* set the xlf capable flag in the vlun for future use */
4737 4739 vlun->svl_xlf_capable = opinfo.opinfo_xlf_capable;
4738 4740 (void) mdi_prop_update_string(pip, "path-class",
4739 4741 opinfo.opinfo_path_attr);
4740 4742
4741 4743 pclass = opinfo.opinfo_path_attr;
4742 4744 if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE) {
4743 4745 mutex_enter(&vlun->svl_mutex);
4744 4746 if (vlun->svl_active_pclass != NULL) {
4745 4747 if (strcmp(vlun->svl_active_pclass, pclass) != 0) {
4746 4748 mutex_exit(&vlun->svl_mutex);
4747 4749 /*
4748 4750 * Externally initiated failover has happened;
4749 4751 * force the path state to be STANDBY/ONLINE,
4750 4752 * next IO will trigger failover and thus
4751 4753 * sync-up the pathstates. Reason we don't
4752 4754 * sync-up immediately by invoking
4753 4755 * vhci_update_pathstates() is because it
4754 4756 * needs a VHCI_HOLD_LUN() and we don't
4755 4757 * want to block here.
4756 4758 *
4757 4759 * Further, if the device is an ALUA device,
4758 4760 * then failure to exactly match 'pclass' and
4759 4761 * 'svl_active_pclass'(as is the case here)
4760 4762 * indicates that the currently active path
4761 4763 * is a 'non-optimized' path - which means
4762 4764 * that 'svl_active_pclass' needs to be
4763 4765 * replaced with opinfo.opinfo_path_state
4764 4766 * value.
4765 4767 */
4766 4768
4767 4769 if (SCSI_FAILOVER_IS_TPGS(vlun->svl_fops)) {
4768 4770 char *tptr;
4769 4771
4770 4772 /*
4771 4773 * The device is ALUA compliant. The
4772 4774 * state need to be changed to online
4773 4775 * rather than standby state which is
4774 4776 * done typically for a asymmetric
4775 4777 * device that is non ALUA compliant.
4776 4778 */
4777 4779 mdi_pi_set_state(pip,
4778 4780 MDI_PATHINFO_STATE_ONLINE);
4779 4781 tptr = kmem_alloc(strlen
4780 4782 (opinfo.opinfo_path_attr)+1,
4781 4783 KM_SLEEP);
4782 4784 (void) strlcpy(tptr,
4783 4785 opinfo.opinfo_path_attr,
4784 4786 (strlen(opinfo.opinfo_path_attr)
4785 4787 +1));
4786 4788 mutex_enter(&vlun->svl_mutex);
4787 4789 kmem_free(vlun->svl_active_pclass,
4788 4790 strlen(vlun->svl_active_pclass)+1);
4789 4791 vlun->svl_active_pclass = tptr;
4790 4792 mutex_exit(&vlun->svl_mutex);
4791 4793 } else {
4792 4794 /*
4793 4795 * Non ALUA device case.
4794 4796 */
4795 4797 mdi_pi_set_state(pip,
4796 4798 MDI_PATHINFO_STATE_STANDBY);
4797 4799 }
4798 4800 vlun->svl_fo_support = opinfo.opinfo_mode;
4799 4801 mdi_pi_set_preferred(pip,
4800 4802 opinfo.opinfo_preferred);
4801 4803 update_pathinfo_done = 1;
4802 4804 }
4803 4805
4804 4806 /*
4805 4807 * Find out a class of currently reserved path if there
4806 4808 * is any.
4807 4809 */
4808 4810 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) &&
4809 4811 mdi_prop_lookup_string(vlun->svl_resrv_pip,
4810 4812 "path-class", &resrv_pclass) != MDI_SUCCESS) {
4811 4813 VHCI_DEBUG(1, (CE_NOTE, NULL,
4812 4814 "!vhci_update_pathinfo: prop lookup "
4813 4815 "failed for path 0x%p\n",
4814 4816 (void *)vlun->svl_resrv_pip));
4815 4817 /*
4816 4818 * Something is wrong with the reserved path.
4817 4819 * We can't do much with that right here. Just
4818 4820 * force re-reservation to another path.
4819 4821 */
4820 4822 force_rereserve = 1;
4821 4823 }
4822 4824
4823 4825 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4824 4826 vlun->svl_fops_ctpriv);
4825 4827 if ((force_rereserve == 1) || ((resrv_pclass != NULL) &&
4826 4828 (strcmp(pclass, best_pclass) == 0) &&
4827 4829 (strcmp(resrv_pclass, best_pclass) != 0))) {
4828 4830 /*
4829 4831 * Inform target driver that a reservation
4830 4832 * should be reinstated because the reserved
4831 4833 * path is not the most preferred one.
4832 4834 */
4833 4835 mutex_enter(&vhci->vhci_mutex);
4834 4836 scsi_hba_reset_notify_callback(
4835 4837 &vhci->vhci_mutex,
4836 4838 &vhci->vhci_reset_notify_listf);
4837 4839 mutex_exit(&vhci->vhci_mutex);
4838 4840 }
4839 4841
4840 4842 if (update_pathinfo_done == 1) {
4841 4843 return (MDI_SUCCESS);
4842 4844 }
4843 4845 } else {
4844 4846 char *tptr;
4845 4847
4846 4848 /*
4847 4849 * lets release the mutex before we try to
4848 4850 * allocate since the potential to sleep is
4849 4851 * possible.
4850 4852 */
4851 4853 mutex_exit(&vlun->svl_mutex);
4852 4854 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4853 4855 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4854 4856 mutex_enter(&vlun->svl_mutex);
4855 4857 vlun->svl_active_pclass = tptr;
4856 4858 }
4857 4859 mutex_exit(&vlun->svl_mutex);
4858 4860 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4859 4861 vlun->svl_waiting_for_activepath = 0;
4860 4862 } else if (opinfo.opinfo_path_state == SCSI_PATH_ACTIVE_NONOPT) {
4861 4863 mutex_enter(&vlun->svl_mutex);
4862 4864 if (vlun->svl_active_pclass == NULL) {
4863 4865 char *tptr;
4864 4866
4865 4867 mutex_exit(&vlun->svl_mutex);
4866 4868 tptr = kmem_alloc(strlen(pclass)+1, KM_SLEEP);
4867 4869 (void) strlcpy(tptr, pclass, (strlen(pclass)+1));
4868 4870 mutex_enter(&vlun->svl_mutex);
4869 4871 vlun->svl_active_pclass = tptr;
4870 4872 }
4871 4873 mutex_exit(&vlun->svl_mutex);
4872 4874 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_ONLINE);
4873 4875 vlun->svl_waiting_for_activepath = 0;
4874 4876 } else if (opinfo.opinfo_path_state == SCSI_PATH_INACTIVE) {
4875 4877 mutex_enter(&vlun->svl_mutex);
4876 4878 if (vlun->svl_active_pclass != NULL) {
4877 4879 if (strcmp(vlun->svl_active_pclass, pclass) == 0) {
4878 4880 mutex_exit(&vlun->svl_mutex);
4879 4881 /*
4880 4882 * externally initiated failover has happened;
4881 4883 * force state to ONLINE (see comment above)
4882 4884 */
4883 4885 mdi_pi_set_state(pip,
4884 4886 MDI_PATHINFO_STATE_ONLINE);
4885 4887 vlun->svl_fo_support = opinfo.opinfo_mode;
4886 4888 mdi_pi_set_preferred(pip,
4887 4889 opinfo.opinfo_preferred);
4888 4890 return (MDI_SUCCESS);
4889 4891 }
4890 4892 }
4891 4893 mutex_exit(&vlun->svl_mutex);
4892 4894 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_STANDBY);
4893 4895
4894 4896 /*
4895 4897 * Initiate auto-failback, if enabled, for path if path-state
4896 4898 * is transitioning from OFFLINE->STANDBY and pathclass is the
4897 4899 * preferred pathclass for this storage.
4898 4900 * NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
4899 4901 * (above), where the pi state is set to STANDBY, we don't
4900 4902 * initiate auto-failback as the next IO shall take care of.
4901 4903 * this. See comment above.
4902 4904 */
4903 4905 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
4904 4906 vlun->svl_fops_ctpriv);
4905 4907 if (((vhci->vhci_conf_flags & VHCI_CONF_FLAGS_AUTO_FAILBACK) ==
4906 4908 VHCI_CONF_FLAGS_AUTO_FAILBACK) &&
4907 4909 (strcmp(pclass, best_pclass) == 0) &&
4908 4910 ((MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_OFFLINE)||
4909 4911 (MDI_PI_OLD_STATE(pip) == MDI_PATHINFO_STATE_INIT))) {
4910 4912 VHCI_DEBUG(1, (CE_NOTE, NULL, "%s pathclass path: %p"
4911 4913 " OFFLINE->STANDBY transition for lun %s\n",
4912 4914 best_pclass, (void *)pip, vlun->svl_lun_wwn));
4913 4915 (void) taskq_dispatch(vhci->vhci_taskq,
4914 4916 vhci_initiate_auto_failback, (void *) vlun,
4915 4917 KM_SLEEP);
4916 4918 }
4917 4919 }
4918 4920 vlun->svl_fo_support = opinfo.opinfo_mode;
4919 4921 mdi_pi_set_preferred(pip, opinfo.opinfo_preferred);
4920 4922
4921 4923 VHCI_DEBUG(8, (CE_NOTE, NULL, "vhci_update_pathinfo: opinfo_rev = %x,"
4922 4924 " opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
4923 4925 opinfo.opinfo_rev, opinfo.opinfo_path_state,
4924 4926 opinfo.opinfo_preferred, opinfo.opinfo_mode));
4925 4927
4926 4928 return (MDI_SUCCESS);
4927 4929 }
4928 4930
4929 4931 /*
4930 4932 * Form the kstat name and and call mdi_pi_kstat_create()
4931 4933 */
4932 4934 void
4933 4935 vhci_kstat_create_pathinfo(mdi_pathinfo_t *pip)
4934 4936 {
4935 4937 dev_info_t *tgt_dip;
4936 4938 dev_info_t *pdip;
4937 4939 char *guid;
4938 4940 char *target_port, *target_port_dup;
4939 4941 char ks_name[KSTAT_STRLEN];
4940 4942 uint_t pid;
4941 4943 int by_id;
4942 4944 mod_hash_val_t hv;
4943 4945
4944 4946
4945 4947 /* return if we have already allocated kstats */
4946 4948 if (mdi_pi_kstat_exists(pip))
4947 4949 return;
4948 4950
4949 4951 /*
4950 4952 * We need instance numbers to create a kstat name, return if we don't
4951 4953 * have instance numbers assigned yet.
4952 4954 */
4953 4955 tgt_dip = mdi_pi_get_client(pip);
4954 4956 pdip = mdi_pi_get_phci(pip);
4955 4957 if ((ddi_get_instance(tgt_dip) == -1) || (ddi_get_instance(pdip) == -1))
4956 4958 return;
4957 4959
4958 4960 /*
4959 4961 * A path oriented kstat has a ks_name of the form:
4960 4962 *
4961 4963 * <client-driver><instance>.t<pid>.<pHCI-driver><instance>
4962 4964 *
4963 4965 * We maintain a bidirectional 'target-port' to <pid> map,
4964 4966 * called targetmap. All pathinfo nodes with the same
4965 4967 * 'target-port' map to the same <pid>. The iostat(1M) code,
4966 4968 * when parsing a path oriented kstat name, uses the <pid> as
4967 4969 * a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
4968 4970 * to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
4969 4971 * this ioctl needs to translate a <pid> to a 'target-port'
4970 4972 * even after all pathinfo nodes associated with the
4971 4973 * 'target-port' have been destroyed. This is needed to support
4972 4974 * consistent first-iteration activity-since-boot iostat(1M)
4973 4975 * output. Because of this requirement, the mapping can't be
4974 4976 * based on pathinfo information in a devinfo snapshot.
4975 4977 */
4976 4978
4977 4979 /* determine 'target-port' */
4978 4980 if (mdi_prop_lookup_string(pip,
4979 4981 SCSI_ADDR_PROP_TARGET_PORT, &target_port) == MDI_SUCCESS) {
4980 4982 target_port_dup = i_ddi_strdup(target_port, KM_SLEEP);
4981 4983 (void) mdi_prop_free(target_port);
4982 4984 by_id = 1;
4983 4985 } else {
4984 4986 /*
4985 4987 * If the pHCI did not set up 'target-port' on this
4986 4988 * pathinfo node, assume that our client is the only
4987 4989 * one with paths to the device by using the guid
4988 4990 * value as the 'target-port'. Since no other client
4989 4991 * will have the same guid, no other client will use
4990 4992 * the same <pid>. NOTE: a client with an instance
4991 4993 * number always has a guid.
4992 4994 */
4993 4995 (void) ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
4994 4996 PROPFLAGS, MDI_CLIENT_GUID_PROP, &guid);
4995 4997 target_port_dup = i_ddi_strdup(guid, KM_SLEEP);
4996 4998 ddi_prop_free(guid);
4997 4999
4998 5000 /*
4999 5001 * For this type of mapping we don't want the
5000 5002 * <id> -> 'target-port' mapping to be made. This
5001 5003 * will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
5002 5004 * to fail, and the iostat(1M) long '-n' output will
5003 5005 * still use the <pid>. We do this because we just
5004 5006 * made up the 'target-port' using the guid, and we
5005 5007 * don't want to expose that fact in iostat output.
5006 5008 */
5007 5009 by_id = 0;
5008 5010 }
5009 5011
5010 5012 /* find/establish <pid> given 'target-port' */
5011 5013 mutex_enter(&vhci_targetmap_mutex);
5012 5014 if (mod_hash_find(vhci_targetmap_byport,
5013 5015 (mod_hash_key_t)target_port_dup, &hv) == 0) {
5014 5016 pid = (int)(intptr_t)hv; /* mapping exists */
5015 5017 } else {
5016 5018 pid = vhci_targetmap_pid++; /* new mapping */
5017 5019
5018 5020 (void) mod_hash_insert(vhci_targetmap_byport,
5019 5021 (mod_hash_key_t)target_port_dup,
5020 5022 (mod_hash_val_t)(intptr_t)pid);
5021 5023 if (by_id) {
5022 5024 (void) mod_hash_insert(vhci_targetmap_bypid,
5023 5025 (mod_hash_key_t)(uintptr_t)pid,
5024 5026 (mod_hash_val_t)(uintptr_t)target_port_dup);
5025 5027 }
5026 5028 target_port_dup = NULL; /* owned by hash */
5027 5029 }
5028 5030 mutex_exit(&vhci_targetmap_mutex);
5029 5031
5030 5032 /* form kstat name */
5031 5033 (void) snprintf(ks_name, KSTAT_STRLEN, "%s%d.t%d.%s%d",
5032 5034 ddi_driver_name(tgt_dip), ddi_get_instance(tgt_dip),
5033 5035 pid, ddi_driver_name(pdip), ddi_get_instance(pdip));
5034 5036
5035 5037 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p "
5036 5038 "kstat %s: pid %x <-> port %s\n", (void *)pip,
5037 5039 ks_name, pid, target_port_dup));
5038 5040 if (target_port_dup)
5039 5041 kmem_free(target_port_dup, strlen(target_port_dup) + 1);
5040 5042
5041 5043 /* call mdi to create kstats with the name we built */
5042 5044 (void) mdi_pi_kstat_create(pip, ks_name);
5043 5045 }
5044 5046
5045 5047 /* ARGSUSED */
5046 5048 static int
5047 5049 vhci_pathinfo_online(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5048 5050 {
5049 5051 scsi_hba_tran_t *hba = NULL;
5050 5052 struct scsi_device *psd = NULL;
5051 5053 scsi_vhci_lun_t *vlun = NULL;
5052 5054 dev_info_t *pdip = NULL;
5053 5055 dev_info_t *cdip;
5054 5056 dev_info_t *tgt_dip;
5055 5057 struct scsi_vhci *vhci;
5056 5058 char *guid;
5057 5059 struct scsi_failover_ops *sfo;
5058 5060 scsi_vhci_priv_t *svp = NULL;
5059 5061 struct scsi_address *ap;
5060 5062 struct scsi_pkt *pkt;
5061 5063 int rval = MDI_FAILURE;
5062 5064 mpapi_item_list_t *list_ptr;
5063 5065 mpapi_lu_data_t *ld;
5064 5066
5065 5067 ASSERT(vdip != NULL);
5066 5068 ASSERT(pip != NULL);
5067 5069
5068 5070 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
5069 5071 ASSERT(vhci != NULL);
5070 5072
5071 5073 pdip = mdi_pi_get_phci(pip);
5072 5074 hba = ddi_get_driver_private(pdip);
5073 5075 ASSERT(hba != NULL);
5074 5076
5075 5077 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5076 5078 ASSERT(svp != NULL);
5077 5079
5078 5080 cdip = mdi_pi_get_client(pip);
5079 5081 ASSERT(cdip != NULL);
5080 5082 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, PROPFLAGS,
5081 5083 MDI_CLIENT_GUID_PROP, &guid) != DDI_SUCCESS) {
5082 5084 VHCI_DEBUG(1, (CE_WARN, NULL, "vhci_path_online: lun guid "
5083 5085 "property failed"));
5084 5086 goto failure;
5085 5087 }
5086 5088
5087 5089 vlun = vhci_lun_lookup(cdip);
5088 5090 ASSERT(vlun != NULL);
5089 5091
5090 5092 ddi_prop_free(guid);
5091 5093
5092 5094 vlun->svl_dip = mdi_pi_get_client(pip);
5093 5095 ASSERT(vlun->svl_dip != NULL);
5094 5096
5095 5097 psd = svp->svp_psd;
5096 5098 ASSERT(psd != NULL);
5097 5099
5098 5100 ap = &psd->sd_address;
5099 5101
5100 5102 /*
5101 5103 * Get inquiry data into pathinfo related scsi_device structure.
5102 5104 * Free sq_inq when pathinfo related scsi_device structure is destroyed
5103 5105 * by vhci_pathinfo_uninit(). In other words, vhci maintains its own
5104 5106 * copy of scsi_device and scsi_inquiry data on a per-path basis.
5105 5107 */
5106 5108 if (scsi_probe(psd, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
5107 5109 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_pathinfo_online: "
5108 5110 "scsi_probe failed path:%p rval:%x\n", (void *)pip, rval));
5109 5111 rval = MDI_FAILURE;
5110 5112 goto failure;
5111 5113 }
5112 5114
5113 5115 /*
5114 5116 * See if we have a failover module to support the device.
5115 5117 *
5116 5118 * We re-probe to determine the failover ops for each path. This
5117 5119 * is done in case there are any path-specific side-effects associated
5118 5120 * with the sfo_device_probe implementation.
5119 5121 *
5120 5122 * Give the first successfull sfo_device_probe the opportunity to
5121 5123 * establish 'ctpriv', vlun/client private data. The ctpriv will
5122 5124 * then be passed into the failover module on all other sfo_device_*()
5123 5125 * operations (and must be freed by sfo_device_unprobe implementation).
5124 5126 *
5125 5127 * NOTE: While sfo_device_probe is done once per path,
5126 5128 * sfo_device_unprobe only occurs once - when the vlun is destroyed.
5127 5129 *
5128 5130 * NOTE: We don't currently support per-path fops private data
5129 5131 * mechanism.
5130 5132 */
5131 5133 sfo = vhci_dev_fo(vdip, psd,
5132 5134 &vlun->svl_fops_ctpriv, &vlun->svl_fops_name);
5133 5135
5134 5136 /* check path configuration result with current vlun state */
5135 5137 if (((sfo && vlun->svl_fops) && (sfo != vlun->svl_fops)) ||
5136 5138 (sfo && vlun->svl_not_supported) ||
5137 5139 ((sfo == NULL) && vlun->svl_fops)) {
5138 5140 /* Getting different results for different paths. */
5139 5141 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5140 5142 "!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
5141 5143 (void *)pip));
5142 5144 cmn_err(CE_WARN, "scsi_vhci: failover contradiction: "
5143 5145 "'%s'.vs.'%s': path %s\n",
5144 5146 vlun->svl_fops ? vlun->svl_fops->sfo_name : "NULL",
5145 5147 sfo ? sfo->sfo_name : "NULL", mdi_pi_pathname(pip));
5146 5148 vlun->svl_not_supported = 1;
5147 5149 rval = MDI_NOT_SUPPORTED;
5148 5150 goto done;
5149 5151 } else if (sfo == NULL) {
5150 5152 /* No failover module - device not supported under vHCI. */
5151 5153 VHCI_DEBUG(1, (CE_NOTE, vhci->vhci_dip,
5152 5154 "!vhci_pathinfo_online: dev (path 0x%p) not "
5153 5155 "supported\n", (void *)pip));
5154 5156
5155 5157 /* XXX does this contradict vhci_is_dev_supported ? */
5156 5158 vlun->svl_not_supported = 1;
5157 5159 rval = MDI_NOT_SUPPORTED;
5158 5160 goto done;
5159 5161 }
5160 5162
5161 5163 /* failover supported for device - save failover_ops in vlun */
5162 5164 vlun->svl_fops = sfo;
5163 5165 ASSERT(vlun->svl_fops_name != NULL);
5164 5166
5165 5167 /*
5166 5168 * Obtain the device-type based mpxio options as specified in
5167 5169 * scsi_vhci.conf file.
5168 5170 *
5169 5171 * NOTE: currently, the end result is a call to
5170 5172 * mdi_set_lb_region_size().
5171 5173 */
5172 5174 tgt_dip = psd->sd_dev;
5173 5175 ASSERT(tgt_dip != NULL);
5174 5176 vhci_get_device_type_mpxio_options(vdip, tgt_dip, psd);
5175 5177
5176 5178 /*
5177 5179 * if PGR is active, revalidate key and register on this path also,
5178 5180 * if key is still valid
5179 5181 */
5180 5182 sema_p(&vlun->svl_pgr_sema);
5181 5183 if (vlun->svl_pgr_active) {
5182 5184 rval = vhci_pgr_validate_and_register(svp);
5183 5185 if (rval != 1) {
5184 5186 rval = MDI_FAILURE;
5185 5187 sema_v(&vlun->svl_pgr_sema);
5186 5188 goto failure;
5187 5189 }
5188 5190 }
5189 5191 sema_v(&vlun->svl_pgr_sema);
5190 5192
5191 5193 if (svp->svp_new_path) {
5192 5194 /*
5193 5195 * Last chance to perform any cleanup operations on this
5194 5196 * new path before making this path completely online.
5195 5197 */
5196 5198 svp->svp_new_path = 0;
5197 5199
5198 5200 /*
5199 5201 * If scsi_vhci knows the lun is alread RESERVE'd,
5200 5202 * then skip the issue of RELEASE on new path.
5201 5203 */
5202 5204 if ((vlun->svl_flags & VLUN_RESERVE_ACTIVE_FLG) == 0) {
5203 5205 /*
5204 5206 * Issue SCSI-2 RELEASE only for the first time on
5205 5207 * a new path just in case the host rebooted and
5206 5208 * a reservation is still pending on this path.
5207 5209 * IBM Shark storage does not clear RESERVE upon
5208 5210 * host reboot.
5209 5211 */
5210 5212 pkt = scsi_init_pkt(ap, NULL, NULL, CDB_GROUP0,
5211 5213 sizeof (struct scsi_arq_status), 0, 0,
5212 5214 SLEEP_FUNC, NULL);
5213 5215 if (pkt == NULL) {
5214 5216 VHCI_DEBUG(1, (CE_NOTE, NULL,
5215 5217 "!vhci_pathinfo_online: "
5216 5218 "Release init_pkt failed :%p\n",
5217 5219 (void *)pip));
5218 5220 rval = MDI_FAILURE;
5219 5221 goto failure;
5220 5222 }
5221 5223 pkt->pkt_cdbp[0] = SCMD_RELEASE;
5222 5224 pkt->pkt_time = 60;
5223 5225
5224 5226 VHCI_DEBUG(1, (CE_NOTE, NULL,
5225 5227 "!vhci_path_online: path:%p "
5226 5228 "Issued SCSI-2 RELEASE\n", (void *)pip));
5227 5229
5228 5230 /* Ignore the return value */
5229 5231 (void) vhci_do_scsi_cmd(pkt);
5230 5232 scsi_destroy_pkt(pkt);
5231 5233 }
5232 5234 }
5233 5235
5234 5236 rval = vhci_update_pathinfo(psd, pip, sfo, vlun, vhci);
5235 5237 if (rval == MDI_FAILURE) {
5236 5238 goto failure;
5237 5239 }
5238 5240
5239 5241 /* Initialize MP-API data */
5240 5242 vhci_update_mpapi_data(vhci, vlun, pip);
5241 5243
5242 5244 /*
5243 5245 * MP-API also needs the Inquiry data to be maintained in the
5244 5246 * mp_vendor_prop_t structure, so find the lun and update its
5245 5247 * structure with this data.
5246 5248 */
5247 5249 list_ptr = (mpapi_item_list_t *)vhci_get_mpapi_item(vhci, NULL,
5248 5250 MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
5249 5251 ld = (mpapi_lu_data_t *)list_ptr->item->idata;
5250 5252 if (ld != NULL) {
5251 5253 bcopy(psd->sd_inq->inq_vid, ld->prop.prodInfo.vendor, 8);
5252 5254 bcopy(psd->sd_inq->inq_pid, ld->prop.prodInfo.product, 16);
5253 5255 bcopy(psd->sd_inq->inq_revision, ld->prop.prodInfo.revision, 4);
5254 5256 } else {
5255 5257 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_pathinfo_online: "
5256 5258 "mpapi_lu_data_t is NULL"));
5257 5259 }
5258 5260
5259 5261 /* create kstats for path */
5260 5262 vhci_kstat_create_pathinfo(pip);
5261 5263
5262 5264 done:
5263 5265 mutex_enter(&vhci_global_mutex);
5264 5266 cv_broadcast(&vhci_cv);
5265 5267 mutex_exit(&vhci_global_mutex);
5266 5268
5267 5269 if (vlun->svl_setcap_done) {
5268 5270 (void) vhci_pHCI_cap(ap, "sector-size",
5269 5271 vlun->svl_sector_size, 1, pip);
5270 5272 }
5271 5273
5272 5274 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_path_online: path:%p\n",
5273 5275 (void *)pip));
5274 5276
5275 5277 failure:
5276 5278 return (rval);
5277 5279 }
5278 5280
5279 5281 /*
5280 5282 * path offline handler. Release all bindings that will not be
5281 5283 * released by the normal packet transport/completion code path.
5282 5284 * Since we don't (presently) keep any bindings alive outside of
5283 5285 * the in-transport packets (which will be released on completion)
5284 5286 * there is not much to do here.
5285 5287 */
5286 5288 /* ARGSUSED */
5287 5289 static int
5288 5290 vhci_pathinfo_offline(dev_info_t *vdip, mdi_pathinfo_t *pip, int flags)
5289 5291 {
5290 5292 scsi_hba_tran_t *hba = NULL;
5291 5293 struct scsi_device *psd = NULL;
5292 5294 dev_info_t *pdip = NULL;
5293 5295 dev_info_t *cdip = NULL;
5294 5296 scsi_vhci_priv_t *svp = NULL;
5295 5297
5296 5298 ASSERT(vdip != NULL);
5297 5299 ASSERT(pip != NULL);
5298 5300
5299 5301 pdip = mdi_pi_get_phci(pip);
5300 5302 ASSERT(pdip != NULL);
5301 5303 if (pdip == NULL) {
5302 5304 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5303 5305 "phci dip", (void *)pip));
5304 5306 return (MDI_FAILURE);
5305 5307 }
5306 5308
5307 5309 cdip = mdi_pi_get_client(pip);
5308 5310 ASSERT(cdip != NULL);
5309 5311 if (cdip == NULL) {
5310 5312 VHCI_DEBUG(1, (CE_WARN, vdip, "Invalid path 0x%p: NULL "
5311 5313 "client dip", (void *)pip));
5312 5314 return (MDI_FAILURE);
5313 5315 }
5314 5316
5315 5317 hba = ddi_get_driver_private(pdip);
5316 5318 ASSERT(hba != NULL);
5317 5319
5318 5320 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
5319 5321 if (svp == NULL) {
5320 5322 /*
5321 5323 * mdi_pathinfo node in INIT state can have vHCI private
5322 5324 * information set to null
5323 5325 */
5324 5326 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5325 5327 "svp is NULL for pip 0x%p\n", (void *)pip));
5326 5328 return (MDI_SUCCESS);
5327 5329 }
5328 5330
5329 5331 psd = svp->svp_psd;
5330 5332 ASSERT(psd != NULL);
5331 5333
5332 5334 mutex_enter(&svp->svp_mutex);
5333 5335
5334 5336 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5335 5337 "%d cmds pending on path: 0x%p\n", svp->svp_cmds, (void *)pip));
5336 5338 while (svp->svp_cmds != 0) {
5337 5339 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
5338 5340 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
5339 5341 TR_CLOCK_TICK) == -1) {
5340 5342 /*
5341 5343 * The timeout time reached without the condition
5342 5344 * being signaled.
5343 5345 */
5344 5346 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5345 5347 "Timeout reached on path 0x%p without the cond\n",
5346 5348 (void *)pip));
5347 5349 VHCI_DEBUG(1, (CE_NOTE, vdip, "!vhci_pathinfo_offline: "
5348 5350 "%d cmds still pending on path: 0x%p\n",
5349 5351 svp->svp_cmds, (void *)pip));
5350 5352 break;
5351 5353 }
5352 5354 }
5353 5355 mutex_exit(&svp->svp_mutex);
5354 5356
5355 5357 /*
5356 5358 * Check to see if this vlun has an active SCSI-II RESERVE. And this
5357 5359 * is the pip for the path that has been reserved.
5358 5360 * If so clear the reservation by sending a reset, so the host will not
5359 5361 * get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG
5360 5362 * for this lun. Also a reset notify is sent to the target driver
5361 5363 * just in case the POR check condition is cleared by some other layer
5362 5364 * in the stack.
5363 5365 */
5364 5366 if (svp->svp_svl->svl_flags & VLUN_RESERVE_ACTIVE_FLG) {
5365 5367 if (pip == svp->svp_svl->svl_resrv_pip) {
5366 5368 if (vhci_recovery_reset(svp->svp_svl,
5367 5369 &svp->svp_psd->sd_address, TRUE,
5368 5370 VHCI_DEPTH_TARGET) == 0) {
5369 5371 VHCI_DEBUG(1, (CE_NOTE, NULL,
5370 5372 "!vhci_pathinfo_offline (pip:%p):"
5371 5373 "reset failed, retrying\n", (void *)pip));
5372 5374 delay(1*drv_usectohz(1000000));
5373 5375 if (vhci_recovery_reset(svp->svp_svl,
5374 5376 &svp->svp_psd->sd_address, TRUE,
5375 5377 VHCI_DEPTH_TARGET) == 0) {
5376 5378 VHCI_DEBUG(1, (CE_NOTE, NULL,
5377 5379 "!vhci_pathinfo_offline "
5378 5380 "(pip:%p): reset failed, "
5379 5381 "giving up!\n", (void *)pip));
5380 5382 }
5381 5383 }
5382 5384 svp->svp_svl->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
5383 5385 }
5384 5386 }
5385 5387
5386 5388 mdi_pi_set_state(pip, MDI_PATHINFO_STATE_OFFLINE);
5387 5389 vhci_mpapi_set_path_state(vdip, pip, MP_DRVR_PATH_STATE_REMOVED);
5388 5390
5389 5391 VHCI_DEBUG(1, (CE_NOTE, NULL,
5390 5392 "!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
5391 5393 return (MDI_SUCCESS);
5392 5394 }
5393 5395
5394 5396
5395 5397 /*
5396 5398 * routine for SCSI VHCI IOCTL implementation.
5397 5399 */
5398 5400 /* ARGSUSED */
5399 5401 static int
5400 5402 vhci_ctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
5401 5403 {
5402 5404 struct scsi_vhci *vhci;
5403 5405 dev_info_t *vdip;
5404 5406 mdi_pathinfo_t *pip;
5405 5407 int instance, held;
5406 5408 int retval = 0;
5407 5409 caddr_t phci_path = NULL, client_path = NULL;
5408 5410 caddr_t paddr = NULL;
5409 5411 sv_iocdata_t ioc;
5410 5412 sv_iocdata_t *pioc = &ioc;
5411 5413 sv_switch_to_cntlr_iocdata_t iocsc;
5412 5414 sv_switch_to_cntlr_iocdata_t *piocsc = &iocsc;
5413 5415 caddr_t s;
5414 5416 scsi_vhci_lun_t *vlun;
5415 5417 struct scsi_failover_ops *fo;
5416 5418 char *pclass;
5417 5419
5418 5420 /* Check for validity of vhci structure */
5419 5421 vhci = ddi_get_soft_state(vhci_softstate, MINOR2INST(getminor(dev)));
5420 5422 if (vhci == NULL) {
5421 5423 return (ENXIO);
5422 5424 }
5423 5425
5424 5426 mutex_enter(&vhci->vhci_mutex);
5425 5427 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
5426 5428 mutex_exit(&vhci->vhci_mutex);
5427 5429 return (ENXIO);
5428 5430 }
5429 5431 mutex_exit(&vhci->vhci_mutex);
5430 5432
5431 5433 /* Get the vhci dip */
5432 5434 vdip = vhci->vhci_dip;
5433 5435 ASSERT(vdip != NULL);
5434 5436 instance = ddi_get_instance(vdip);
5435 5437
5436 5438 /* Allocate memory for getting parameters from userland */
5437 5439 phci_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5438 5440 client_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
5439 5441 paddr = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
5440 5442
5441 5443 /*
5442 5444 * Set a local variable indicating the ioctl name. Used for
5443 5445 * printing debug strings.
5444 5446 */
5445 5447 switch (cmd) {
5446 5448 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5447 5449 s = "GET_CLIENT_MULTIPATH_INFO";
5448 5450 break;
5449 5451
5450 5452 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5451 5453 s = "GET_PHCI_MULTIPATH_INFO";
5452 5454 break;
5453 5455
5454 5456 case SCSI_VHCI_GET_CLIENT_NAME:
5455 5457 s = "GET_CLIENT_NAME";
5456 5458 break;
5457 5459
5458 5460 case SCSI_VHCI_PATH_ONLINE:
5459 5461 s = "PATH_ONLINE";
5460 5462 break;
5461 5463
5462 5464 case SCSI_VHCI_PATH_OFFLINE:
5463 5465 s = "PATH_OFFLINE";
5464 5466 break;
5465 5467
5466 5468 case SCSI_VHCI_PATH_STANDBY:
5467 5469 s = "PATH_STANDBY";
5468 5470 break;
5469 5471
5470 5472 case SCSI_VHCI_PATH_TEST:
5471 5473 s = "PATH_TEST";
5472 5474 break;
5473 5475
5474 5476 case SCSI_VHCI_SWITCH_TO_CNTLR:
5475 5477 s = "SWITCH_TO_CNTLR";
5476 5478 break;
5477 5479 case SCSI_VHCI_PATH_DISABLE:
5478 5480 s = "PATH_DISABLE";
5479 5481 break;
5480 5482 case SCSI_VHCI_PATH_ENABLE:
5481 5483 s = "PATH_ENABLE";
5482 5484 break;
5483 5485
5484 5486 case SCSI_VHCI_GET_TARGET_LONGNAME:
5485 5487 s = "GET_TARGET_LONGNAME";
5486 5488 break;
5487 5489
5488 5490 #ifdef DEBUG
5489 5491 case SCSI_VHCI_CONFIGURE_PHCI:
5490 5492 s = "CONFIGURE_PHCI";
5491 5493 break;
5492 5494
5493 5495 case SCSI_VHCI_UNCONFIGURE_PHCI:
5494 5496 s = "UNCONFIGURE_PHCI";
5495 5497 break;
5496 5498 #endif
5497 5499
5498 5500 default:
5499 5501 s = "Unknown";
5500 5502 vhci_log(CE_NOTE, vdip,
5501 5503 "!vhci%d: ioctl %x (unsupported ioctl)", instance, cmd);
5502 5504 retval = ENOTSUP;
5503 5505 break;
5504 5506 }
5505 5507 if (retval != 0) {
5506 5508 goto end;
5507 5509 }
5508 5510
5509 5511 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci%d: ioctl <%s>", instance, s));
5510 5512
5511 5513 /*
5512 5514 * Get IOCTL parameters from userland
5513 5515 */
5514 5516 switch (cmd) {
5515 5517 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5516 5518 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5517 5519 case SCSI_VHCI_GET_CLIENT_NAME:
5518 5520 case SCSI_VHCI_PATH_ONLINE:
5519 5521 case SCSI_VHCI_PATH_OFFLINE:
5520 5522 case SCSI_VHCI_PATH_STANDBY:
5521 5523 case SCSI_VHCI_PATH_TEST:
5522 5524 case SCSI_VHCI_PATH_DISABLE:
5523 5525 case SCSI_VHCI_PATH_ENABLE:
5524 5526 case SCSI_VHCI_GET_TARGET_LONGNAME:
5525 5527 #ifdef DEBUG
5526 5528 case SCSI_VHCI_CONFIGURE_PHCI:
5527 5529 case SCSI_VHCI_UNCONFIGURE_PHCI:
5528 5530 #endif
5529 5531 retval = vhci_get_iocdata((const void *)data, pioc, mode, s);
5530 5532 break;
5531 5533
5532 5534 case SCSI_VHCI_SWITCH_TO_CNTLR:
5533 5535 retval = vhci_get_iocswitchdata((const void *)data, piocsc,
5534 5536 mode, s);
5535 5537 break;
5536 5538 }
5537 5539 if (retval != 0) {
5538 5540 goto end;
5539 5541 }
5540 5542
5541 5543
5542 5544 /*
5543 5545 * Process the IOCTL
5544 5546 */
5545 5547 switch (cmd) {
5546 5548 case SCSI_VHCI_GET_CLIENT_MULTIPATH_INFO:
5547 5549 {
5548 5550 uint_t num_paths; /* Num paths to client dev */
5549 5551 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5550 5552 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5551 5553 dev_info_t *cdip; /* Client device dip */
5552 5554
5553 5555 if (pioc->ret_elem == NULL) {
5554 5556 retval = EINVAL;
5555 5557 break;
5556 5558 }
5557 5559
5558 5560 /* Get client device path from user land */
5559 5561 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5560 5562 retval = EFAULT;
5561 5563 break;
5562 5564 }
5563 5565
5564 5566 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5565 5567 "client <%s>", s, client_path));
5566 5568
5567 5569 /* Get number of paths to this client device */
5568 5570 if ((cdip = mdi_client_path2devinfo(vdip, client_path))
5569 5571 == NULL) {
5570 5572 retval = ENXIO;
5571 5573 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5572 5574 "client dip doesn't exist. invalid path <%s>",
5573 5575 s, client_path));
5574 5576 break;
5575 5577 }
5576 5578 num_paths = mdi_client_get_path_count(cdip);
5577 5579
5578 5580 if (ddi_copyout(&num_paths, pioc->ret_elem,
5579 5581 sizeof (num_paths), mode)) {
5580 5582 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5581 5583 "num_paths copyout failed", s));
5582 5584 retval = EFAULT;
5583 5585 break;
5584 5586 }
5585 5587
5586 5588 /* If user just wanted num_paths, then return */
5587 5589 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5588 5590 num_paths == 0) {
5589 5591 break;
5590 5592 }
5591 5593
5592 5594 /* Set num_paths to value as much as can be sent to userland */
5593 5595 if (num_paths > pioc->buf_elem) {
5594 5596 num_paths = pioc->buf_elem;
5595 5597 }
5596 5598
5597 5599 /* Allocate memory and get userland pointers */
5598 5600 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5599 5601 pioc, mode, s) != 0) {
5600 5602 retval = EFAULT;
5601 5603 break;
5602 5604 }
5603 5605 ASSERT(upibuf != NULL);
5604 5606 ASSERT(kpibuf != NULL);
5605 5607
5606 5608 /*
5607 5609 * Get the path information and send it to userland.
5608 5610 */
5609 5611 if (vhci_get_client_path_list(cdip, kpibuf, num_paths)
5610 5612 != MDI_SUCCESS) {
5611 5613 retval = ENXIO;
5612 5614 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5613 5615 break;
5614 5616 }
5615 5617
5616 5618 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5617 5619 pioc, mode, s)) {
5618 5620 retval = EFAULT;
5619 5621 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5620 5622 break;
5621 5623 }
5622 5624
5623 5625 /* Free the memory allocated for path information */
5624 5626 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5625 5627 break;
5626 5628 }
5627 5629
5628 5630 case SCSI_VHCI_GET_PHCI_MULTIPATH_INFO:
5629 5631 {
5630 5632 uint_t num_paths; /* Num paths to client dev */
5631 5633 sv_path_info_t *upibuf = NULL; /* To keep userland values */
5632 5634 sv_path_info_t *kpibuf = NULL; /* Kernel data for ioctls */
5633 5635 dev_info_t *pdip; /* PHCI device dip */
5634 5636
5635 5637 if (pioc->ret_elem == NULL) {
5636 5638 retval = EINVAL;
5637 5639 break;
5638 5640 }
5639 5641
5640 5642 /* Get PHCI device path from user land */
5641 5643 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5642 5644 retval = EFAULT;
5643 5645 break;
5644 5646 }
5645 5647
5646 5648 VHCI_DEBUG(6, (CE_WARN, vdip,
5647 5649 "!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
5648 5650
5649 5651 /* Get number of devices associated with this PHCI device */
5650 5652 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5651 5653 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5652 5654 "phci dip doesn't exist. invalid path <%s>",
5653 5655 s, phci_path));
5654 5656 retval = ENXIO;
5655 5657 break;
5656 5658 }
5657 5659
5658 5660 num_paths = mdi_phci_get_path_count(pdip);
5659 5661
5660 5662 if (ddi_copyout(&num_paths, pioc->ret_elem,
5661 5663 sizeof (num_paths), mode)) {
5662 5664 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5663 5665 "num_paths copyout failed", s));
5664 5666 retval = EFAULT;
5665 5667 break;
5666 5668 }
5667 5669
5668 5670 /* If user just wanted num_paths, then return */
5669 5671 if (pioc->buf_elem == 0 || pioc->ret_buf == NULL ||
5670 5672 num_paths == 0) {
5671 5673 break;
5672 5674 }
5673 5675
5674 5676 /* Set num_paths to value as much as can be sent to userland */
5675 5677 if (num_paths > pioc->buf_elem) {
5676 5678 num_paths = pioc->buf_elem;
5677 5679 }
5678 5680
5679 5681 /* Allocate memory and get userland pointers */
5680 5682 if (vhci_ioc_alloc_pathinfo(&upibuf, &kpibuf, num_paths,
5681 5683 pioc, mode, s) != 0) {
5682 5684 retval = EFAULT;
5683 5685 break;
5684 5686 }
5685 5687 ASSERT(upibuf != NULL);
5686 5688 ASSERT(kpibuf != NULL);
5687 5689
5688 5690 /*
5689 5691 * Get the path information and send it to userland.
5690 5692 */
5691 5693 if (vhci_get_phci_path_list(pdip, kpibuf, num_paths)
5692 5694 != MDI_SUCCESS) {
5693 5695 retval = ENXIO;
5694 5696 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5695 5697 break;
5696 5698 }
5697 5699
5698 5700 if (vhci_ioc_send_pathinfo(upibuf, kpibuf, num_paths,
5699 5701 pioc, mode, s)) {
5700 5702 retval = EFAULT;
5701 5703 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5702 5704 break;
5703 5705 }
5704 5706
5705 5707 /* Free the memory allocated for path information */
5706 5708 vhci_ioc_free_pathinfo(upibuf, kpibuf, num_paths);
5707 5709 break;
5708 5710 }
5709 5711
5710 5712 case SCSI_VHCI_GET_CLIENT_NAME:
5711 5713 {
5712 5714 dev_info_t *cdip, *pdip;
5713 5715
5714 5716 /* Get PHCI path and device address from user land */
5715 5717 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5716 5718 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5717 5719 retval = EFAULT;
5718 5720 break;
5719 5721 }
5720 5722
5721 5723 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5722 5724 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5723 5725
5724 5726 /* Get the PHCI dip */
5725 5727 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5726 5728 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5727 5729 "phci dip doesn't exist. invalid path <%s>",
5728 5730 s, phci_path));
5729 5731 retval = ENXIO;
5730 5732 break;
5731 5733 }
5732 5734
5733 5735 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5734 5736 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5735 5737 "pathinfo doesn't exist. invalid device addr", s));
5736 5738 retval = ENXIO;
5737 5739 break;
5738 5740 }
5739 5741
5740 5742 /* Get the client device pathname and send to userland */
5741 5743 cdip = mdi_pi_get_client(pip);
5742 5744 vhci_ioc_devi_to_path(cdip, client_path);
5743 5745
5744 5746 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5745 5747 "client <%s>", s, client_path));
5746 5748
5747 5749 if (vhci_ioc_send_client_path(client_path, pioc, mode, s)) {
5748 5750 retval = EFAULT;
5749 5751 break;
5750 5752 }
5751 5753 break;
5752 5754 }
5753 5755
5754 5756 case SCSI_VHCI_PATH_ONLINE:
5755 5757 case SCSI_VHCI_PATH_OFFLINE:
5756 5758 case SCSI_VHCI_PATH_STANDBY:
5757 5759 case SCSI_VHCI_PATH_TEST:
5758 5760 {
5759 5761 dev_info_t *pdip; /* PHCI dip */
5760 5762
5761 5763 /* Get PHCI path and device address from user land */
5762 5764 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s) ||
5763 5765 vhci_ioc_get_paddr(pioc, paddr, mode, s)) {
5764 5766 retval = EFAULT;
5765 5767 break;
5766 5768 }
5767 5769
5768 5770 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5769 5771 "phci <%s>, paddr <%s>", s, phci_path, paddr));
5770 5772
5771 5773 /* Get the PHCI dip */
5772 5774 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5773 5775 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5774 5776 "phci dip doesn't exist. invalid path <%s>",
5775 5777 s, phci_path));
5776 5778 retval = ENXIO;
5777 5779 break;
5778 5780 }
5779 5781
5780 5782 if ((pip = mdi_pi_find(pdip, NULL, paddr)) == NULL) {
5781 5783 VHCI_DEBUG(1, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5782 5784 "pathinfo doesn't exist. invalid device addr", s));
5783 5785 retval = ENXIO;
5784 5786 break;
5785 5787 }
5786 5788
5787 5789 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5788 5790 "Calling MDI function to change device state", s));
5789 5791
5790 5792 switch (cmd) {
5791 5793 case SCSI_VHCI_PATH_ONLINE:
5792 5794 retval = mdi_pi_online(pip, 0);
5793 5795 break;
5794 5796
5795 5797 case SCSI_VHCI_PATH_OFFLINE:
5796 5798 retval = mdi_pi_offline(pip, 0);
5797 5799 break;
5798 5800
5799 5801 case SCSI_VHCI_PATH_STANDBY:
5800 5802 retval = mdi_pi_standby(pip, 0);
5801 5803 break;
5802 5804
5803 5805 case SCSI_VHCI_PATH_TEST:
5804 5806 break;
5805 5807 }
5806 5808 break;
5807 5809 }
5808 5810
5809 5811 case SCSI_VHCI_SWITCH_TO_CNTLR:
5810 5812 {
5811 5813 dev_info_t *cdip;
5812 5814 struct scsi_device *devp;
5813 5815
5814 5816 /* Get the client device pathname */
5815 5817 if (ddi_copyin(piocsc->client, client_path,
5816 5818 MAXPATHLEN, mode)) {
5817 5819 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5818 5820 "client_path copyin failed", s));
5819 5821 retval = EFAULT;
5820 5822 break;
5821 5823 }
5822 5824
5823 5825 /* Get the path class to which user wants to switch */
5824 5826 if (ddi_copyin(piocsc->class, paddr, MAXNAMELEN, mode)) {
5825 5827 VHCI_DEBUG(2, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
5826 5828 "controller_class copyin failed", s));
5827 5829 retval = EFAULT;
5828 5830 break;
5829 5831 }
5830 5832
5831 5833 /* Perform validity checks */
5832 5834 if ((cdip = mdi_client_path2devinfo(vdip,
5833 5835 client_path)) == NULL) {
5834 5836 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5835 5837 "client dip doesn't exist. invalid path <%s>",
5836 5838 s, client_path));
5837 5839 retval = ENXIO;
5838 5840 break;
5839 5841 }
5840 5842
5841 5843 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: Calling MDI func "
5842 5844 "to switch controller"));
5843 5845 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: client <%s> "
5844 5846 "class <%s>", client_path, paddr));
5845 5847
5846 5848 if (strcmp(paddr, PCLASS_PRIMARY) &&
5847 5849 strcmp(paddr, PCLASS_SECONDARY)) {
5848 5850 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5849 5851 "invalid path class <%s>", s, paddr));
5850 5852 retval = ENXIO;
5851 5853 break;
5852 5854 }
5853 5855
5854 5856 devp = ddi_get_driver_private(cdip);
5855 5857 if (devp == NULL) {
5856 5858 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5857 5859 "invalid scsi device <%s>", s, client_path));
5858 5860 retval = ENXIO;
5859 5861 break;
5860 5862 }
5861 5863 vlun = ADDR2VLUN(&devp->sd_address);
5862 5864 ASSERT(vlun);
5863 5865
5864 5866 /*
5865 5867 * Checking to see if device has only one pclass, PRIMARY.
5866 5868 * If so this device doesn't support failovers. Assumed
5867 5869 * that the devices with one pclass is PRIMARY, as thats the
5868 5870 * case today. If this is not true and in future other
5869 5871 * symmetric devices are supported with other pclass, this
5870 5872 * IOCTL shall have to be overhauled anyways as now the only
5871 5873 * arguments it accepts are PRIMARY and SECONDARY.
5872 5874 */
5873 5875 fo = vlun->svl_fops;
5874 5876 if (fo->sfo_pathclass_next(PCLASS_PRIMARY, &pclass,
5875 5877 vlun->svl_fops_ctpriv)) {
5876 5878 retval = ENOTSUP;
5877 5879 break;
5878 5880 }
5879 5881
5880 5882 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
5881 5883 mutex_enter(&vlun->svl_mutex);
5882 5884 if (vlun->svl_active_pclass != NULL) {
5883 5885 if (strcmp(vlun->svl_active_pclass, paddr) == 0) {
5884 5886 mutex_exit(&vlun->svl_mutex);
5885 5887 retval = EALREADY;
5886 5888 VHCI_RELEASE_LUN(vlun);
5887 5889 break;
5888 5890 }
5889 5891 }
5890 5892 mutex_exit(&vlun->svl_mutex);
5891 5893 /* Call mdi function to cause a switch over */
5892 5894 retval = mdi_failover(vdip, cdip, MDI_FAILOVER_SYNC);
5893 5895 if (retval == MDI_SUCCESS) {
5894 5896 retval = 0;
5895 5897 } else if (retval == MDI_BUSY) {
5896 5898 retval = EBUSY;
5897 5899 } else {
5898 5900 retval = EIO;
5899 5901 }
5900 5902 VHCI_RELEASE_LUN(vlun);
5901 5903 break;
5902 5904 }
5903 5905
5904 5906 case SCSI_VHCI_PATH_ENABLE:
5905 5907 case SCSI_VHCI_PATH_DISABLE:
5906 5908 {
5907 5909 dev_info_t *cdip, *pdip;
5908 5910
5909 5911 /*
5910 5912 * Get client device path from user land
5911 5913 */
5912 5914 if (vhci_ioc_get_client_path(pioc, client_path, mode, s)) {
5913 5915 retval = EFAULT;
5914 5916 break;
5915 5917 }
5916 5918
5917 5919 /*
5918 5920 * Get Phci device path from user land
5919 5921 */
5920 5922 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
5921 5923 retval = EFAULT;
5922 5924 break;
5923 5925 }
5924 5926
5925 5927 /*
5926 5928 * Get the devinfo for the Phci.
5927 5929 */
5928 5930 if ((pdip = mdi_phci_path2devinfo(vdip, phci_path)) == NULL) {
5929 5931 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5930 5932 "phci dip doesn't exist. invalid path <%s>",
5931 5933 s, phci_path));
5932 5934 retval = ENXIO;
5933 5935 break;
5934 5936 }
5935 5937
5936 5938 /*
5937 5939 * If the client path is set to /scsi_vhci then we need
5938 5940 * to do the operation on all the clients so set cdip to NULL.
5939 5941 * Else, try to get the client dip.
5940 5942 */
5941 5943 if (strcmp(client_path, "/scsi_vhci") == 0) {
5942 5944 cdip = NULL;
5943 5945 } else {
5944 5946 if ((cdip = mdi_client_path2devinfo(vdip,
5945 5947 client_path)) == NULL) {
5946 5948 retval = ENXIO;
5947 5949 VHCI_DEBUG(1, (CE_WARN, NULL,
5948 5950 "!vhci_ioctl: ioctl <%s> client dip "
5949 5951 "doesn't exist. invalid path <%s>",
5950 5952 s, client_path));
5951 5953 break;
5952 5954 }
5953 5955 }
5954 5956
5955 5957 if (cmd == SCSI_VHCI_PATH_ENABLE)
5956 5958 retval = mdi_pi_enable(cdip, pdip, USER_DISABLE);
5957 5959 else
5958 5960 retval = mdi_pi_disable(cdip, pdip, USER_DISABLE);
5959 5961
5960 5962 break;
5961 5963 }
5962 5964
5963 5965 case SCSI_VHCI_GET_TARGET_LONGNAME:
5964 5966 {
5965 5967 uint_t pid = pioc->buf_elem;
5966 5968 char *target_port;
5967 5969 mod_hash_val_t hv;
5968 5970
5969 5971 /* targetmap lookup of 'target-port' by <pid> */
5970 5972 if (mod_hash_find(vhci_targetmap_bypid,
5971 5973 (mod_hash_key_t)(uintptr_t)pid, &hv) != 0) {
5972 5974 /*
5973 5975 * NOTE: failure to find the mapping is OK for guid
5974 5976 * based 'target-port' values.
5975 5977 */
5976 5978 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5977 5979 "targetport mapping doesn't exist: pid %d",
5978 5980 s, pid));
5979 5981 retval = ENXIO;
5980 5982 break;
5981 5983 }
5982 5984
5983 5985 /* copyout 'target-port' result */
5984 5986 target_port = (char *)hv;
5985 5987 if (copyoutstr(target_port, pioc->addr, MAXNAMELEN, NULL)) {
5986 5988 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
5987 5989 "targetport copyout failed: len: %d",
5988 5990 s, (int)strlen(target_port)));
5989 5991 retval = EFAULT;
5990 5992 }
5991 5993 break;
5992 5994 }
5993 5995
5994 5996 #ifdef DEBUG
5995 5997 case SCSI_VHCI_CONFIGURE_PHCI:
5996 5998 {
5997 5999 dev_info_t *pdip;
5998 6000
5999 6001 /* Get PHCI path and device address from user land */
6000 6002 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6001 6003 retval = EFAULT;
6002 6004 break;
6003 6005 }
6004 6006
6005 6007 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6006 6008 "phci <%s>", s, phci_path));
6007 6009
6008 6010 /* Get the PHCI dip */
6009 6011 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6010 6012 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6011 6013 "phci dip doesn't exist. invalid path <%s>",
6012 6014 s, phci_path));
6013 6015 retval = ENXIO;
6014 6016 break;
6015 6017 }
6016 6018
6017 6019 if (ndi_devi_config(pdip,
6018 6020 NDI_DEVFS_CLEAN|NDI_DEVI_PERSIST) != NDI_SUCCESS) {
6019 6021 retval = EIO;
6020 6022 }
6021 6023
6022 6024 ddi_release_devi(pdip);
6023 6025 break;
6024 6026 }
6025 6027
6026 6028 case SCSI_VHCI_UNCONFIGURE_PHCI:
6027 6029 {
6028 6030 dev_info_t *pdip;
6029 6031
6030 6032 /* Get PHCI path and device address from user land */
6031 6033 if (vhci_ioc_get_phci_path(pioc, phci_path, mode, s)) {
6032 6034 retval = EFAULT;
6033 6035 break;
6034 6036 }
6035 6037
6036 6038 VHCI_DEBUG(6, (CE_WARN, vdip, "!vhci_ioctl: ioctl <%s> "
6037 6039 "phci <%s>", s, phci_path));
6038 6040
6039 6041 /* Get the PHCI dip */
6040 6042 if ((pdip = e_ddi_hold_devi_by_path(phci_path, 0)) == NULL) {
6041 6043 VHCI_DEBUG(3, (CE_WARN, NULL, "!vhci_ioctl: ioctl <%s> "
6042 6044 "phci dip doesn't exist. invalid path <%s>",
6043 6045 s, phci_path));
6044 6046 retval = ENXIO;
6045 6047 break;
6046 6048 }
6047 6049
6048 6050 if (ndi_devi_unconfig(pdip,
6049 6051 NDI_DEVI_REMOVE|NDI_DEVFS_CLEAN) != NDI_SUCCESS) {
6050 6052 retval = EBUSY;
6051 6053 }
6052 6054
6053 6055 ddi_release_devi(pdip);
6054 6056 break;
6055 6057 }
6056 6058 #endif
6057 6059 }
6058 6060
6059 6061 end:
6060 6062 /* Free the memory allocated above */
6061 6063 if (phci_path != NULL) {
6062 6064 kmem_free(phci_path, MAXPATHLEN);
6063 6065 }
6064 6066 if (client_path != NULL) {
6065 6067 kmem_free(client_path, MAXPATHLEN);
6066 6068 }
6067 6069 if (paddr != NULL) {
6068 6070 kmem_free(paddr, MAXNAMELEN);
6069 6071 }
6070 6072 return (retval);
6071 6073 }
6072 6074
6073 6075 /*
6074 6076 * devctl IOCTL support for client device DR
6075 6077 */
6076 6078 /* ARGSUSED */
6077 6079 int
6078 6080 vhci_devctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
6079 6081 int *rvalp)
6080 6082 {
6081 6083 dev_info_t *self;
6082 6084 dev_info_t *child;
6083 6085 scsi_hba_tran_t *hba;
6084 6086 struct devctl_iocdata *dcp;
6085 6087 struct scsi_vhci *vhci;
6086 6088 int rv = 0;
6087 6089 int retval = 0;
6088 6090 scsi_vhci_priv_t *svp;
6089 6091 mdi_pathinfo_t *pip;
6090 6092
6091 6093 if ((vhci = ddi_get_soft_state(vhci_softstate,
6092 6094 MINOR2INST(getminor(dev)))) == NULL)
6093 6095 return (ENXIO);
6094 6096
6095 6097 /*
6096 6098 * check if :devctl minor device has been opened
6097 6099 */
6098 6100 mutex_enter(&vhci->vhci_mutex);
6099 6101 if ((vhci->vhci_state & VHCI_STATE_OPEN) == 0) {
6100 6102 mutex_exit(&vhci->vhci_mutex);
6101 6103 return (ENXIO);
6102 6104 }
6103 6105 mutex_exit(&vhci->vhci_mutex);
6104 6106
6105 6107 self = vhci->vhci_dip;
6106 6108 hba = ddi_get_driver_private(self);
6107 6109 if (hba == NULL)
6108 6110 return (ENXIO);
6109 6111
6110 6112 /*
6111 6113 * We can use the generic implementation for these ioctls
6112 6114 */
6113 6115 switch (cmd) {
6114 6116 case DEVCTL_DEVICE_GETSTATE:
6115 6117 case DEVCTL_DEVICE_ONLINE:
6116 6118 case DEVCTL_DEVICE_OFFLINE:
6117 6119 case DEVCTL_DEVICE_REMOVE:
6118 6120 case DEVCTL_BUS_GETSTATE:
6119 6121 return (ndi_devctl_ioctl(self, cmd, arg, mode, 0));
6120 6122 }
6121 6123
6122 6124 /*
6123 6125 * read devctl ioctl data
6124 6126 */
6125 6127 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
6126 6128 return (EFAULT);
6127 6129
6128 6130 switch (cmd) {
6129 6131
6130 6132 case DEVCTL_DEVICE_RESET:
6131 6133 /*
6132 6134 * lookup and hold child device
6133 6135 */
6134 6136 if ((child = ndi_devi_find(self, ndi_dc_getname(dcp),
6135 6137 ndi_dc_getaddr(dcp))) == NULL) {
6136 6138 rv = ENXIO;
6137 6139 break;
6138 6140 }
6139 6141 retval = mdi_select_path(child, NULL,
6140 6142 (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH),
6141 6143 NULL, &pip);
6142 6144 if ((retval != MDI_SUCCESS) || (pip == NULL)) {
6143 6145 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioctl:"
6144 6146 "Unable to get a path, dip 0x%p", (void *)child));
6145 6147 rv = ENXIO;
6146 6148 break;
6147 6149 }
6148 6150 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
6149 6151 if (vhci_recovery_reset(svp->svp_svl,
6150 6152 &svp->svp_psd->sd_address, TRUE,
6151 6153 VHCI_DEPTH_TARGET) == 0) {
6152 6154 VHCI_DEBUG(1, (CE_NOTE, NULL,
6153 6155 "!vhci_ioctl(pip:%p): "
6154 6156 "reset failed\n", (void *)pip));
6155 6157 rv = ENXIO;
6156 6158 }
6157 6159 mdi_rele_path(pip);
6158 6160 break;
6159 6161
6160 6162 case DEVCTL_BUS_QUIESCE:
6161 6163 case DEVCTL_BUS_UNQUIESCE:
6162 6164 case DEVCTL_BUS_RESET:
6163 6165 case DEVCTL_BUS_RESETALL:
6164 6166 #ifdef DEBUG
6165 6167 case DEVCTL_BUS_CONFIGURE:
6166 6168 case DEVCTL_BUS_UNCONFIGURE:
6167 6169 #endif
6168 6170 rv = ENOTSUP;
6169 6171 break;
6170 6172
6171 6173 default:
6172 6174 rv = ENOTTY;
6173 6175 } /* end of outer switch */
6174 6176
6175 6177 ndi_dc_freehdl(dcp);
6176 6178 return (rv);
6177 6179 }
6178 6180
6179 6181 /*
6180 6182 * Routine to get the PHCI pathname from ioctl structures in userland
6181 6183 */
6182 6184 /* ARGSUSED */
6183 6185 static int
6184 6186 vhci_ioc_get_phci_path(sv_iocdata_t *pioc, caddr_t phci_path,
6185 6187 int mode, caddr_t s)
6186 6188 {
6187 6189 int retval = 0;
6188 6190
6189 6191 if (ddi_copyin(pioc->phci, phci_path, MAXPATHLEN, mode)) {
6190 6192 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_phci: ioctl <%s> "
6191 6193 "phci_path copyin failed", s));
6192 6194 retval = EFAULT;
6193 6195 }
6194 6196 return (retval);
6195 6197
6196 6198 }
6197 6199
6198 6200
6199 6201 /*
6200 6202 * Routine to get the Client device pathname from ioctl structures in userland
6201 6203 */
6202 6204 /* ARGSUSED */
6203 6205 static int
6204 6206 vhci_ioc_get_client_path(sv_iocdata_t *pioc, caddr_t client_path,
6205 6207 int mode, caddr_t s)
6206 6208 {
6207 6209 int retval = 0;
6208 6210
6209 6211 if (ddi_copyin(pioc->client, client_path, MAXPATHLEN, mode)) {
6210 6212 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_client: "
6211 6213 "ioctl <%s> client_path copyin failed", s));
6212 6214 retval = EFAULT;
6213 6215 }
6214 6216 return (retval);
6215 6217 }
6216 6218
6217 6219
6218 6220 /*
6219 6221 * Routine to get physical device address from ioctl structure in userland
6220 6222 */
6221 6223 /* ARGSUSED */
6222 6224 static int
6223 6225 vhci_ioc_get_paddr(sv_iocdata_t *pioc, caddr_t paddr, int mode, caddr_t s)
6224 6226 {
6225 6227 int retval = 0;
6226 6228
6227 6229 if (ddi_copyin(pioc->addr, paddr, MAXNAMELEN, mode)) {
6228 6230 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_get_paddr: "
6229 6231 "ioctl <%s> device addr copyin failed", s));
6230 6232 retval = EFAULT;
6231 6233 }
6232 6234 return (retval);
6233 6235 }
6234 6236
6235 6237
6236 6238 /*
6237 6239 * Routine to send client device pathname to userland.
6238 6240 */
6239 6241 /* ARGSUSED */
6240 6242 static int
6241 6243 vhci_ioc_send_client_path(caddr_t client_path, sv_iocdata_t *pioc,
6242 6244 int mode, caddr_t s)
6243 6245 {
6244 6246 int retval = 0;
6245 6247
6246 6248 if (ddi_copyout(client_path, pioc->client, MAXPATHLEN, mode)) {
6247 6249 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_ioc_send_client: "
6248 6250 "ioctl <%s> client_path copyout failed", s));
6249 6251 retval = EFAULT;
6250 6252 }
6251 6253 return (retval);
6252 6254 }
6253 6255
6254 6256
6255 6257 /*
6256 6258 * Routine to translated dev_info pointer (dip) to device pathname.
6257 6259 */
6258 6260 static void
6259 6261 vhci_ioc_devi_to_path(dev_info_t *dip, caddr_t path)
6260 6262 {
6261 6263 (void) ddi_pathname(dip, path);
6262 6264 }
6263 6265
6264 6266
6265 6267 /*
6266 6268 * vhci_get_phci_path_list:
6267 6269 * get information about devices associated with a
6268 6270 * given PHCI device.
6269 6271 *
6270 6272 * Return Values:
6271 6273 * path information elements
6272 6274 */
6273 6275 int
6274 6276 vhci_get_phci_path_list(dev_info_t *pdip, sv_path_info_t *pibuf,
6275 6277 uint_t num_elems)
6276 6278 {
6277 6279 uint_t count, done;
6278 6280 mdi_pathinfo_t *pip;
6279 6281 sv_path_info_t *ret_pip;
6280 6282 int status;
6281 6283 size_t prop_size;
6282 6284 int circular;
6283 6285
6284 6286 /*
6285 6287 * Get the PHCI structure and retrieve the path information
6286 6288 * from the GUID hash table.
6287 6289 */
6288 6290
6289 6291 ret_pip = pibuf;
6290 6292 count = 0;
6291 6293
6292 6294 ndi_devi_enter(pdip, &circular);
6293 6295
6294 6296 done = (count >= num_elems);
6295 6297 pip = mdi_get_next_client_path(pdip, NULL);
6296 6298 while (pip && !done) {
6297 6299 mdi_pi_lock(pip);
6298 6300 (void) ddi_pathname(mdi_pi_get_phci(pip),
6299 6301 ret_pip->device.ret_phci);
6300 6302 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6301 6303 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6302 6304 &ret_pip->ret_ext_state);
6303 6305
6304 6306 status = mdi_prop_size(pip, &prop_size);
6305 6307 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6306 6308 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6307 6309 }
6308 6310
6309 6311 #ifdef DEBUG
6310 6312 if (status != MDI_SUCCESS) {
6311 6313 VHCI_DEBUG(2, (CE_WARN, NULL,
6312 6314 "!vhci_get_phci_path_list: "
6313 6315 "phci <%s>, prop size failure 0x%x",
6314 6316 ret_pip->device.ret_phci, status));
6315 6317 }
6316 6318 #endif /* DEBUG */
6317 6319
6318 6320
6319 6321 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6320 6322 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6321 6323 status = mdi_prop_pack(pip,
6322 6324 &ret_pip->ret_prop.buf,
6323 6325 ret_pip->ret_prop.buf_size);
6324 6326
6325 6327 #ifdef DEBUG
6326 6328 if (status != MDI_SUCCESS) {
6327 6329 VHCI_DEBUG(2, (CE_WARN, NULL,
6328 6330 "!vhci_get_phci_path_list: "
6329 6331 "phci <%s>, prop pack failure 0x%x",
6330 6332 ret_pip->device.ret_phci, status));
6331 6333 }
6332 6334 #endif /* DEBUG */
6333 6335 }
6334 6336
6335 6337 mdi_pi_unlock(pip);
6336 6338 pip = mdi_get_next_client_path(pdip, pip);
6337 6339 ret_pip++;
6338 6340 count++;
6339 6341 done = (count >= num_elems);
6340 6342 }
6341 6343
6342 6344 ndi_devi_exit(pdip, circular);
6343 6345
6344 6346 return (MDI_SUCCESS);
6345 6347 }
6346 6348
6347 6349
6348 6350 /*
6349 6351 * vhci_get_client_path_list:
6350 6352 * get information about various paths associated with a
6351 6353 * given client device.
6352 6354 *
6353 6355 * Return Values:
6354 6356 * path information elements
6355 6357 */
6356 6358 int
6357 6359 vhci_get_client_path_list(dev_info_t *cdip, sv_path_info_t *pibuf,
6358 6360 uint_t num_elems)
6359 6361 {
6360 6362 uint_t count, done;
6361 6363 mdi_pathinfo_t *pip;
6362 6364 sv_path_info_t *ret_pip;
6363 6365 int status;
6364 6366 size_t prop_size;
6365 6367 int circular;
6366 6368
6367 6369 ret_pip = pibuf;
6368 6370 count = 0;
6369 6371
6370 6372 ndi_devi_enter(cdip, &circular);
6371 6373
6372 6374 done = (count >= num_elems);
6373 6375 pip = mdi_get_next_phci_path(cdip, NULL);
6374 6376 while (pip && !done) {
6375 6377 mdi_pi_lock(pip);
6376 6378 (void) ddi_pathname(mdi_pi_get_phci(pip),
6377 6379 ret_pip->device.ret_phci);
6378 6380 (void) strcpy(ret_pip->ret_addr, mdi_pi_get_addr(pip));
6379 6381 (void) mdi_pi_get_state2(pip, &ret_pip->ret_state,
6380 6382 &ret_pip->ret_ext_state);
6381 6383
6382 6384 status = mdi_prop_size(pip, &prop_size);
6383 6385 if (status == MDI_SUCCESS && ret_pip->ret_prop.ret_buf_size) {
6384 6386 *ret_pip->ret_prop.ret_buf_size = (uint_t)prop_size;
6385 6387 }
6386 6388
6387 6389 #ifdef DEBUG
6388 6390 if (status != MDI_SUCCESS) {
6389 6391 VHCI_DEBUG(2, (CE_WARN, NULL,
6390 6392 "!vhci_get_client_path_list: "
6391 6393 "phci <%s>, prop size failure 0x%x",
6392 6394 ret_pip->device.ret_phci, status));
6393 6395 }
6394 6396 #endif /* DEBUG */
6395 6397
6396 6398
6397 6399 if (status == MDI_SUCCESS && ret_pip->ret_prop.buf &&
6398 6400 prop_size && ret_pip->ret_prop.buf_size >= prop_size) {
6399 6401 status = mdi_prop_pack(pip,
6400 6402 &ret_pip->ret_prop.buf,
6401 6403 ret_pip->ret_prop.buf_size);
6402 6404
6403 6405 #ifdef DEBUG
6404 6406 if (status != MDI_SUCCESS) {
6405 6407 VHCI_DEBUG(2, (CE_WARN, NULL,
6406 6408 "!vhci_get_client_path_list: "
6407 6409 "phci <%s>, prop pack failure 0x%x",
6408 6410 ret_pip->device.ret_phci, status));
6409 6411 }
6410 6412 #endif /* DEBUG */
6411 6413 }
6412 6414
6413 6415 mdi_pi_unlock(pip);
6414 6416 pip = mdi_get_next_phci_path(cdip, pip);
6415 6417 ret_pip++;
6416 6418 count++;
6417 6419 done = (count >= num_elems);
6418 6420 }
6419 6421
6420 6422 ndi_devi_exit(cdip, circular);
6421 6423
6422 6424 return (MDI_SUCCESS);
6423 6425 }
6424 6426
6425 6427
6426 6428 /*
6427 6429 * Routine to get ioctl argument structure from userland.
6428 6430 */
6429 6431 /* ARGSUSED */
6430 6432 static int
6431 6433 vhci_get_iocdata(const void *data, sv_iocdata_t *pioc, int mode, caddr_t s)
6432 6434 {
6433 6435 int retval = 0;
6434 6436
6435 6437 #ifdef _MULTI_DATAMODEL
6436 6438 switch (ddi_model_convert_from(mode & FMODELS)) {
6437 6439 case DDI_MODEL_ILP32:
6438 6440 {
6439 6441 sv_iocdata32_t ioc32;
6440 6442
6441 6443 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6442 6444 retval = EFAULT;
6443 6445 break;
6444 6446 }
6445 6447 pioc->client = (caddr_t)(uintptr_t)ioc32.client;
6446 6448 pioc->phci = (caddr_t)(uintptr_t)ioc32.phci;
6447 6449 pioc->addr = (caddr_t)(uintptr_t)ioc32.addr;
6448 6450 pioc->buf_elem = (uint_t)ioc32.buf_elem;
6449 6451 pioc->ret_buf = (sv_path_info_t *)(uintptr_t)ioc32.ret_buf;
6450 6452 pioc->ret_elem = (uint_t *)(uintptr_t)ioc32.ret_elem;
6451 6453 break;
6452 6454 }
6453 6455
6454 6456 case DDI_MODEL_NONE:
6455 6457 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6456 6458 retval = EFAULT;
6457 6459 break;
6458 6460 }
6459 6461 break;
6460 6462 }
6461 6463 #else /* _MULTI_DATAMODEL */
6462 6464 if (ddi_copyin(data, pioc, sizeof (*pioc), mode)) {
6463 6465 retval = EFAULT;
6464 6466 }
6465 6467 #endif /* _MULTI_DATAMODEL */
6466 6468
6467 6469 #ifdef DEBUG
6468 6470 if (retval) {
6469 6471 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6470 6472 "iocdata copyin failed", s));
6471 6473 }
6472 6474 #endif
6473 6475
6474 6476 return (retval);
6475 6477 }
6476 6478
6477 6479
6478 6480 /*
6479 6481 * Routine to get the ioctl argument for ioctl causing controller switchover.
6480 6482 */
6481 6483 /* ARGSUSED */
6482 6484 static int
6483 6485 vhci_get_iocswitchdata(const void *data, sv_switch_to_cntlr_iocdata_t *piocsc,
6484 6486 int mode, caddr_t s)
6485 6487 {
6486 6488 int retval = 0;
6487 6489
6488 6490 #ifdef _MULTI_DATAMODEL
6489 6491 switch (ddi_model_convert_from(mode & FMODELS)) {
6490 6492 case DDI_MODEL_ILP32:
6491 6493 {
6492 6494 sv_switch_to_cntlr_iocdata32_t ioc32;
6493 6495
6494 6496 if (ddi_copyin(data, &ioc32, sizeof (ioc32), mode)) {
6495 6497 retval = EFAULT;
6496 6498 break;
6497 6499 }
6498 6500 piocsc->client = (caddr_t)(uintptr_t)ioc32.client;
6499 6501 piocsc->class = (caddr_t)(uintptr_t)ioc32.class;
6500 6502 break;
6501 6503 }
6502 6504
6503 6505 case DDI_MODEL_NONE:
6504 6506 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6505 6507 retval = EFAULT;
6506 6508 }
6507 6509 break;
6508 6510 }
6509 6511 #else /* _MULTI_DATAMODEL */
6510 6512 if (ddi_copyin(data, piocsc, sizeof (*piocsc), mode)) {
6511 6513 retval = EFAULT;
6512 6514 }
6513 6515 #endif /* _MULTI_DATAMODEL */
6514 6516
6515 6517 #ifdef DEBUG
6516 6518 if (retval) {
6517 6519 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: cmd <%s> "
6518 6520 "switch_to_cntlr_iocdata copyin failed", s));
6519 6521 }
6520 6522 #endif
6521 6523
6522 6524 return (retval);
6523 6525 }
6524 6526
6525 6527
6526 6528 /*
6527 6529 * Routine to allocate memory for the path information structures.
6528 6530 * It allocates two chunks of memory - one for keeping userland
6529 6531 * pointers/values for path information and path properties, second for
6530 6532 * keeping allocating kernel memory for path properties. These path
6531 6533 * properties are finally copied to userland.
6532 6534 */
6533 6535 /* ARGSUSED */
6534 6536 static int
6535 6537 vhci_ioc_alloc_pathinfo(sv_path_info_t **upibuf, sv_path_info_t **kpibuf,
6536 6538 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6537 6539 {
6538 6540 sv_path_info_t *pi;
6539 6541 uint_t bufsize;
6540 6542 int retval = 0;
6541 6543 int index;
6542 6544
6543 6545 /* Allocate memory */
6544 6546 *upibuf = (sv_path_info_t *)
6545 6547 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6546 6548 ASSERT(*upibuf != NULL);
6547 6549 *kpibuf = (sv_path_info_t *)
6548 6550 kmem_zalloc(sizeof (sv_path_info_t) * num_paths, KM_SLEEP);
6549 6551 ASSERT(*kpibuf != NULL);
6550 6552
6551 6553 /*
6552 6554 * Get the path info structure from the user space.
6553 6555 * We are interested in the following fields:
6554 6556 * - user size of buffer for per path properties.
6555 6557 * - user address of buffer for path info properties.
6556 6558 * - user pointer for returning actual buffer size
6557 6559 * Keep these fields in the 'upibuf' structures.
6558 6560 * Allocate buffer for per path info properties in kernel
6559 6561 * structure ('kpibuf').
6560 6562 * Size of these buffers will be equal to the size of buffers
6561 6563 * in the user space.
6562 6564 */
6563 6565 #ifdef _MULTI_DATAMODEL
6564 6566 switch (ddi_model_convert_from(mode & FMODELS)) {
6565 6567 case DDI_MODEL_ILP32:
6566 6568 {
6567 6569 sv_path_info32_t *src;
6568 6570 sv_path_info32_t pi32;
6569 6571
6570 6572 src = (sv_path_info32_t *)pioc->ret_buf;
6571 6573 pi = (sv_path_info_t *)*upibuf;
6572 6574 for (index = 0; index < num_paths; index++, src++, pi++) {
6573 6575 if (ddi_copyin(src, &pi32, sizeof (pi32), mode)) {
6574 6576 retval = EFAULT;
6575 6577 break;
6576 6578 }
6577 6579
6578 6580 pi->ret_prop.buf_size =
6579 6581 (uint_t)pi32.ret_prop.buf_size;
6580 6582 pi->ret_prop.ret_buf_size =
6581 6583 (uint_t *)(uintptr_t)pi32.ret_prop.ret_buf_size;
6582 6584 pi->ret_prop.buf =
6583 6585 (caddr_t)(uintptr_t)pi32.ret_prop.buf;
6584 6586 }
6585 6587 break;
6586 6588 }
6587 6589
6588 6590 case DDI_MODEL_NONE:
6589 6591 if (ddi_copyin(pioc->ret_buf, *upibuf,
6590 6592 sizeof (sv_path_info_t) * num_paths, mode)) {
6591 6593 retval = EFAULT;
6592 6594 }
6593 6595 break;
6594 6596 }
6595 6597 #else /* _MULTI_DATAMODEL */
6596 6598 if (ddi_copyin(pioc->ret_buf, *upibuf,
6597 6599 sizeof (sv_path_info_t) * num_paths, mode)) {
6598 6600 retval = EFAULT;
6599 6601 }
6600 6602 #endif /* _MULTI_DATAMODEL */
6601 6603
6602 6604 if (retval != 0) {
6603 6605 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_alloc_path_info: "
6604 6606 "ioctl <%s> normal: path_info copyin failed", s));
6605 6607 kmem_free(*upibuf, sizeof (sv_path_info_t) * num_paths);
6606 6608 kmem_free(*kpibuf, sizeof (sv_path_info_t) * num_paths);
6607 6609 *upibuf = NULL;
6608 6610 *kpibuf = NULL;
6609 6611 return (retval);
6610 6612 }
6611 6613
6612 6614 /*
6613 6615 * Allocate memory for per path properties.
6614 6616 */
6615 6617 for (index = 0, pi = *kpibuf; index < num_paths; index++, pi++) {
6616 6618 bufsize = (*upibuf)[index].ret_prop.buf_size;
6617 6619
6618 6620 if (bufsize && bufsize <= SV_PROP_MAX_BUF_SIZE) {
6619 6621 pi->ret_prop.buf_size = bufsize;
6620 6622 pi->ret_prop.buf = (caddr_t)
6621 6623 kmem_zalloc(bufsize, KM_SLEEP);
6622 6624 ASSERT(pi->ret_prop.buf != NULL);
6623 6625 } else {
6624 6626 pi->ret_prop.buf_size = 0;
6625 6627 pi->ret_prop.buf = NULL;
6626 6628 }
6627 6629
6628 6630 if ((*upibuf)[index].ret_prop.ret_buf_size != NULL) {
6629 6631 pi->ret_prop.ret_buf_size = (uint_t *)kmem_zalloc(
6630 6632 sizeof (*pi->ret_prop.ret_buf_size), KM_SLEEP);
6631 6633 ASSERT(pi->ret_prop.ret_buf_size != NULL);
6632 6634 } else {
6633 6635 pi->ret_prop.ret_buf_size = NULL;
6634 6636 }
6635 6637 }
6636 6638
6637 6639 return (0);
6638 6640 }
6639 6641
6640 6642
6641 6643 /*
6642 6644 * Routine to free memory for the path information structures.
6643 6645 * This is the memory which was allocated earlier.
6644 6646 */
6645 6647 /* ARGSUSED */
6646 6648 static void
6647 6649 vhci_ioc_free_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6648 6650 uint_t num_paths)
6649 6651 {
6650 6652 sv_path_info_t *pi;
6651 6653 int index;
6652 6654
6653 6655 /* Free memory for per path properties */
6654 6656 for (index = 0, pi = kpibuf; index < num_paths; index++, pi++) {
6655 6657 if (pi->ret_prop.ret_buf_size != NULL) {
6656 6658 kmem_free(pi->ret_prop.ret_buf_size,
6657 6659 sizeof (*pi->ret_prop.ret_buf_size));
6658 6660 }
6659 6661
6660 6662 if (pi->ret_prop.buf != NULL) {
6661 6663 kmem_free(pi->ret_prop.buf, pi->ret_prop.buf_size);
6662 6664 }
6663 6665 }
6664 6666
6665 6667 /* Free memory for path info structures */
6666 6668 kmem_free(upibuf, sizeof (sv_path_info_t) * num_paths);
6667 6669 kmem_free(kpibuf, sizeof (sv_path_info_t) * num_paths);
6668 6670 }
6669 6671
6670 6672
6671 6673 /*
6672 6674 * Routine to copy path information and path properties to userland.
6673 6675 */
6674 6676 /* ARGSUSED */
6675 6677 static int
6676 6678 vhci_ioc_send_pathinfo(sv_path_info_t *upibuf, sv_path_info_t *kpibuf,
6677 6679 uint_t num_paths, sv_iocdata_t *pioc, int mode, caddr_t s)
6678 6680 {
6679 6681 int retval = 0, index;
6680 6682 sv_path_info_t *upi_ptr;
6681 6683 sv_path_info32_t *upi32_ptr;
6682 6684
6683 6685 #ifdef _MULTI_DATAMODEL
6684 6686 switch (ddi_model_convert_from(mode & FMODELS)) {
6685 6687 case DDI_MODEL_ILP32:
6686 6688 goto copy_32bit;
6687 6689
6688 6690 case DDI_MODEL_NONE:
6689 6691 goto copy_normal;
6690 6692 }
6691 6693 #else /* _MULTI_DATAMODEL */
6692 6694
6693 6695 goto copy_normal;
6694 6696
6695 6697 #endif /* _MULTI_DATAMODEL */
6696 6698
6697 6699 copy_normal:
6698 6700
6699 6701 /*
6700 6702 * Copy path information and path properties to user land.
6701 6703 * Pointer fields inside the path property structure were
6702 6704 * saved in the 'upibuf' structure earlier.
6703 6705 */
6704 6706 upi_ptr = pioc->ret_buf;
6705 6707 for (index = 0; index < num_paths; index++) {
6706 6708 if (ddi_copyout(kpibuf[index].device.ret_ct,
6707 6709 upi_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6708 6710 retval = EFAULT;
6709 6711 break;
6710 6712 }
6711 6713
6712 6714 if (ddi_copyout(kpibuf[index].ret_addr,
6713 6715 upi_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6714 6716 retval = EFAULT;
6715 6717 break;
6716 6718 }
6717 6719
6718 6720 if (ddi_copyout(&kpibuf[index].ret_state,
6719 6721 &upi_ptr[index].ret_state, sizeof (kpibuf[index].ret_state),
6720 6722 mode)) {
6721 6723 retval = EFAULT;
6722 6724 break;
6723 6725 }
6724 6726
6725 6727 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6726 6728 &upi_ptr[index].ret_ext_state,
6727 6729 sizeof (kpibuf[index].ret_ext_state), mode)) {
6728 6730 retval = EFAULT;
6729 6731 break;
6730 6732 }
6731 6733
6732 6734 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6733 6735 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6734 6736 upibuf[index].ret_prop.ret_buf_size,
6735 6737 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6736 6738 retval = EFAULT;
6737 6739 break;
6738 6740 }
6739 6741
6740 6742 if ((kpibuf[index].ret_prop.buf != NULL) &&
6741 6743 ddi_copyout(kpibuf[index].ret_prop.buf,
6742 6744 upibuf[index].ret_prop.buf,
6743 6745 upibuf[index].ret_prop.buf_size, mode)) {
6744 6746 retval = EFAULT;
6745 6747 break;
6746 6748 }
6747 6749 }
6748 6750
6749 6751 #ifdef DEBUG
6750 6752 if (retval) {
6751 6753 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6752 6754 "normal: path_info copyout failed", s));
6753 6755 }
6754 6756 #endif
6755 6757
6756 6758 return (retval);
6757 6759
6758 6760 copy_32bit:
6759 6761 /*
6760 6762 * Copy path information and path properties to user land.
6761 6763 * Pointer fields inside the path property structure were
6762 6764 * saved in the 'upibuf' structure earlier.
6763 6765 */
6764 6766 upi32_ptr = (sv_path_info32_t *)pioc->ret_buf;
6765 6767 for (index = 0; index < num_paths; index++) {
6766 6768 if (ddi_copyout(kpibuf[index].device.ret_ct,
6767 6769 upi32_ptr[index].device.ret_ct, MAXPATHLEN, mode)) {
6768 6770 retval = EFAULT;
6769 6771 break;
6770 6772 }
6771 6773
6772 6774 if (ddi_copyout(kpibuf[index].ret_addr,
6773 6775 upi32_ptr[index].ret_addr, MAXNAMELEN, mode)) {
6774 6776 retval = EFAULT;
6775 6777 break;
6776 6778 }
6777 6779
6778 6780 if (ddi_copyout(&kpibuf[index].ret_state,
6779 6781 &upi32_ptr[index].ret_state,
6780 6782 sizeof (kpibuf[index].ret_state), mode)) {
6781 6783 retval = EFAULT;
6782 6784 break;
6783 6785 }
6784 6786
6785 6787 if (ddi_copyout(&kpibuf[index].ret_ext_state,
6786 6788 &upi32_ptr[index].ret_ext_state,
6787 6789 sizeof (kpibuf[index].ret_ext_state), mode)) {
6788 6790 retval = EFAULT;
6789 6791 break;
6790 6792 }
6791 6793 if ((kpibuf[index].ret_prop.ret_buf_size != NULL) &&
6792 6794 ddi_copyout(kpibuf[index].ret_prop.ret_buf_size,
6793 6795 upibuf[index].ret_prop.ret_buf_size,
6794 6796 sizeof (*upibuf[index].ret_prop.ret_buf_size), mode)) {
6795 6797 retval = EFAULT;
6796 6798 break;
6797 6799 }
6798 6800
6799 6801 if ((kpibuf[index].ret_prop.buf != NULL) &&
6800 6802 ddi_copyout(kpibuf[index].ret_prop.buf,
6801 6803 upibuf[index].ret_prop.buf,
6802 6804 upibuf[index].ret_prop.buf_size, mode)) {
6803 6805 retval = EFAULT;
6804 6806 break;
6805 6807 }
6806 6808 }
6807 6809
6808 6810 #ifdef DEBUG
6809 6811 if (retval) {
6810 6812 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_get_ioc: ioctl <%s> "
6811 6813 "normal: path_info copyout failed", s));
6812 6814 }
6813 6815 #endif
6814 6816
6815 6817 return (retval);
6816 6818 }
6817 6819
6818 6820
6819 6821 /*
6820 6822 * vhci_failover()
6821 6823 * This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked
6822 6824 * as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers
6823 6825 * this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers
6824 6826 * it is the callers responsibility to release lun.
6825 6827 */
6826 6828
6827 6829 /* ARGSUSED */
6828 6830 static int
6829 6831 vhci_failover(dev_info_t *vdip, dev_info_t *cdip, int flags)
6830 6832 {
6831 6833 char *guid;
6832 6834 scsi_vhci_lun_t *vlun = NULL;
6833 6835 struct scsi_vhci *vhci;
6834 6836 mdi_pathinfo_t *pip, *npip;
6835 6837 char *s_pclass, *pclass1, *pclass2, *pclass;
6836 6838 char active_pclass_copy[255], *active_pclass_ptr;
6837 6839 char *ptr1, *ptr2;
6838 6840 mdi_pathinfo_state_t pi_state;
6839 6841 uint32_t pi_ext_state;
6840 6842 scsi_vhci_priv_t *svp;
6841 6843 struct scsi_device *sd;
6842 6844 struct scsi_failover_ops *sfo;
6843 6845 int sps; /* mdi_select_path() status */
6844 6846 int activation_done = 0;
6845 6847 int rval, retval = MDI_FAILURE;
6846 6848 int reserve_pending, check_condition, UA_condition;
6847 6849 struct scsi_pkt *pkt;
6848 6850 struct buf *bp;
6849 6851
6850 6852 vhci = ddi_get_soft_state(vhci_softstate, ddi_get_instance(vdip));
6851 6853 sd = ddi_get_driver_private(cdip);
6852 6854 vlun = ADDR2VLUN(&sd->sd_address);
6853 6855 ASSERT(vlun != 0);
6854 6856 ASSERT(VHCI_LUN_IS_HELD(vlun));
6855 6857 guid = vlun->svl_lun_wwn;
6856 6858 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(1): guid %s\n", guid));
6857 6859 vhci_log(CE_NOTE, vdip, "!Initiating failover for device %s "
6858 6860 "(GUID %s)", ddi_node_name(cdip), guid);
6859 6861
6860 6862 /*
6861 6863 * Lets maintain a local copy of the vlun->svl_active_pclass
6862 6864 * for the rest of the processing. Accessing the field
6863 6865 * directly in the loop below causes loop logic to break
6864 6866 * especially when the field gets updated by other threads
6865 6867 * update path status etc and causes 'paths are not currently
6866 6868 * available' condition to be declared prematurely.
6867 6869 */
6868 6870 mutex_enter(&vlun->svl_mutex);
6869 6871 if (vlun->svl_active_pclass != NULL) {
6870 6872 (void) strlcpy(active_pclass_copy, vlun->svl_active_pclass,
6871 6873 sizeof (active_pclass_copy));
6872 6874 active_pclass_ptr = &active_pclass_copy[0];
6873 6875 mutex_exit(&vlun->svl_mutex);
6874 6876 if (vhci_quiesce_paths(vdip, cdip, vlun, guid,
6875 6877 active_pclass_ptr) != 0) {
6876 6878 retval = MDI_FAILURE;
6877 6879 }
6878 6880 } else {
6879 6881 /*
6880 6882 * can happen only when the available path to device
6881 6883 * discovered is a STANDBY path.
6882 6884 */
6883 6885 mutex_exit(&vlun->svl_mutex);
6884 6886 active_pclass_copy[0] = '\0';
6885 6887 active_pclass_ptr = NULL;
6886 6888 }
6887 6889
6888 6890 sfo = vlun->svl_fops;
6889 6891 ASSERT(sfo != NULL);
6890 6892 pclass1 = s_pclass = active_pclass_ptr;
6891 6893 VHCI_DEBUG(1, (CE_NOTE, NULL, "!(%s)failing over from %s\n", guid,
6892 6894 (s_pclass == NULL ? "<none>" : s_pclass)));
6893 6895
6894 6896 next_pathclass:
6895 6897
6896 6898 rval = sfo->sfo_pathclass_next(pclass1, &pclass2,
6897 6899 vlun->svl_fops_ctpriv);
6898 6900 if (rval == ENOENT) {
6899 6901 if (s_pclass == NULL) {
6900 6902 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(4)(%s): "
6901 6903 "failed, no more pathclasses\n", guid));
6902 6904 goto done;
6903 6905 } else {
6904 6906 (void) sfo->sfo_pathclass_next(NULL, &pclass2,
6905 6907 vlun->svl_fops_ctpriv);
6906 6908 }
6907 6909 } else if (rval == EINVAL) {
6908 6910 vhci_log(CE_NOTE, vdip, "!Failover operation failed for "
6909 6911 "device %s (GUID %s): Invalid path-class %s",
6910 6912 ddi_node_name(cdip), guid,
6911 6913 ((pclass1 == NULL) ? "<none>" : pclass1));
6912 6914 goto done;
6913 6915 }
6914 6916 if ((s_pclass != NULL) && (strcmp(pclass2, s_pclass) == 0)) {
6915 6917 /*
6916 6918 * paths are not currently available
6917 6919 */
6918 6920 vhci_log(CE_NOTE, vdip, "!Failover path currently unavailable"
6919 6921 " for device %s (GUID %s)",
6920 6922 ddi_node_name(cdip), guid);
6921 6923 goto done;
6922 6924 }
6923 6925 pip = npip = NULL;
6924 6926 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(5.2)(%s): considering "
6925 6927 "%s as failover destination\n", guid, pclass2));
6926 6928 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH, NULL, &npip);
6927 6929 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
6928 6930 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(%s): no "
6929 6931 "STANDBY paths found (status:%x)!\n", guid, sps));
6930 6932 pclass1 = pclass2;
6931 6933 goto next_pathclass;
6932 6934 }
6933 6935 do {
6934 6936 pclass = NULL;
6935 6937 if ((mdi_prop_lookup_string(npip, "path-class",
6936 6938 &pclass) != MDI_SUCCESS) || (strcmp(pclass2,
6937 6939 pclass) != 0)) {
6938 6940 VHCI_DEBUG(1, (CE_NOTE, NULL,
6939 6941 "!vhci_failover(5.5)(%s): skipping path "
6940 6942 "%p(%s)...\n", guid, (void *)npip, pclass));
6941 6943 pip = npip;
6942 6944 sps = mdi_select_path(cdip, NULL,
6943 6945 MDI_SELECT_STANDBY_PATH, pip, &npip);
6944 6946 mdi_rele_path(pip);
6945 6947 (void) mdi_prop_free(pclass);
6946 6948 continue;
6947 6949 }
6948 6950 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
6949 6951
6950 6952 /*
6951 6953 * Issue READ at non-zer block on this STANDBY path.
6952 6954 * Purple returns
6953 6955 * 1. RESERVATION_CONFLICT if reservation is pending
6954 6956 * 2. POR check condition if it reset happened.
6955 6957 * 2. failover Check Conditions if one is already in progress.
6956 6958 */
6957 6959 reserve_pending = 0;
6958 6960 check_condition = 0;
6959 6961 UA_condition = 0;
6960 6962
6961 6963 bp = scsi_alloc_consistent_buf(&svp->svp_psd->sd_address,
6962 6964 (struct buf *)NULL, DEV_BSIZE, B_READ, NULL, NULL);
6963 6965 if (!bp) {
6964 6966 VHCI_DEBUG(1, (CE_NOTE, NULL,
6965 6967 "vhci_failover !No resources (buf)\n"));
6966 6968 mdi_rele_path(npip);
6967 6969 goto done;
6968 6970 }
6969 6971 pkt = scsi_init_pkt(&svp->svp_psd->sd_address, NULL, bp,
6970 6972 CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
6971 6973 PKT_CONSISTENT, NULL, NULL);
6972 6974 if (pkt) {
6973 6975 (void) scsi_setup_cdb((union scsi_cdb *)(uintptr_t)
6974 6976 pkt->pkt_cdbp, SCMD_READ, 1, 1, 0);
6975 6977 pkt->pkt_flags = FLAG_NOINTR;
6976 6978 check_path_again:
6977 6979 pkt->pkt_path_instance = mdi_pi_get_path_instance(npip);
6978 6980 pkt->pkt_time = 3*30;
6979 6981
6980 6982 if (scsi_transport(pkt) == TRAN_ACCEPT) {
6981 6983 switch (pkt->pkt_reason) {
6982 6984 case CMD_CMPLT:
6983 6985 switch (SCBP_C(pkt)) {
6984 6986 case STATUS_GOOD:
6985 6987 /* Already failed over */
6986 6988 activation_done = 1;
6987 6989 break;
6988 6990 case STATUS_RESERVATION_CONFLICT:
6989 6991 reserve_pending = 1;
6990 6992 break;
6991 6993 case STATUS_CHECK:
6992 6994 check_condition = 1;
6993 6995 break;
6994 6996 }
6995 6997 }
6996 6998 }
6997 6999 if (check_condition &&
6998 7000 (pkt->pkt_state & STATE_ARQ_DONE)) {
6999 7001 uint8_t *sns, skey, asc, ascq;
7000 7002 sns = (uint8_t *)
7001 7003 &(((struct scsi_arq_status *)(uintptr_t)
7002 7004 (pkt->pkt_scbp))->sts_sensedata);
7003 7005 skey = scsi_sense_key(sns);
7004 7006 asc = scsi_sense_asc(sns);
7005 7007 ascq = scsi_sense_ascq(sns);
7006 7008 if (skey == KEY_UNIT_ATTENTION &&
7007 7009 asc == 0x29) {
7008 7010 /* Already failed over */
7009 7011 VHCI_DEBUG(1, (CE_NOTE, NULL,
7010 7012 "!vhci_failover(7)(%s): "
7011 7013 "path 0x%p POR UA condition\n",
7012 7014 guid, (void *)npip));
7013 7015 if (UA_condition == 0) {
7014 7016 UA_condition = 1;
7015 7017 goto check_path_again;
7016 7018 }
7017 7019 } else {
7018 7020 activation_done = 0;
7019 7021 VHCI_DEBUG(1, (CE_NOTE, NULL,
7020 7022 "!vhci_failover(%s): path 0x%p "
7021 7023 "unhandled chkcond %x %x %x\n",
7022 7024 guid, (void *)npip, skey,
7023 7025 asc, ascq));
7024 7026 }
7025 7027 }
7026 7028 scsi_destroy_pkt(pkt);
7027 7029 }
7028 7030 scsi_free_consistent_buf(bp);
7029 7031
7030 7032 if (activation_done) {
7031 7033 mdi_rele_path(npip);
7032 7034 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7033 7035 "path 0x%p already failedover\n", guid,
7034 7036 (void *)npip));
7035 7037 break;
7036 7038 }
7037 7039 if (reserve_pending && (vlun->svl_xlf_capable == 0)) {
7038 7040 (void) vhci_recovery_reset(vlun,
7039 7041 &svp->svp_psd->sd_address,
7040 7042 FALSE, VHCI_DEPTH_ALL);
7041 7043 }
7042 7044 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(6)(%s): "
7043 7045 "activating path 0x%p(psd:%p)\n", guid, (void *)npip,
7044 7046 (void *)svp->svp_psd));
7045 7047 if (sfo->sfo_path_activate(svp->svp_psd, pclass2,
7046 7048 vlun->svl_fops_ctpriv) == 0) {
7047 7049 activation_done = 1;
7048 7050 mdi_rele_path(npip);
7049 7051 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(7)(%s): "
7050 7052 "path 0x%p successfully activated\n", guid,
7051 7053 (void *)npip));
7052 7054 break;
7053 7055 }
7054 7056 pip = npip;
7055 7057 sps = mdi_select_path(cdip, NULL, MDI_SELECT_STANDBY_PATH,
7056 7058 pip, &npip);
7057 7059 mdi_rele_path(pip);
7058 7060 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7059 7061 if (activation_done == 0) {
7060 7062 pclass1 = pclass2;
7061 7063 goto next_pathclass;
7062 7064 }
7063 7065
7064 7066 /*
7065 7067 * if we are here, we have succeeded in activating path npip of
7066 7068 * pathclass pclass2; let us validate all paths of pclass2 by
7067 7069 * "ping"-ing each one and mark the good ones ONLINE
7068 7070 * Also, set the state of the paths belonging to the previously
7069 7071 * active pathclass to STANDBY
7070 7072 */
7071 7073 pip = npip = NULL;
7072 7074 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7073 7075 MDI_SELECT_STANDBY_PATH | MDI_SELECT_USER_DISABLE_PATH),
7074 7076 NULL, &npip);
7075 7077 if (npip == NULL || sps != MDI_SUCCESS) {
7076 7078 VHCI_DEBUG(1, (CE_NOTE, NULL, "!Failover operation failed for "
7077 7079 "device %s (GUID %s): paths may be busy\n",
7078 7080 ddi_node_name(cdip), guid));
7079 7081 goto done;
7080 7082 }
7081 7083 do {
7082 7084 (void) mdi_pi_get_state2(npip, &pi_state, &pi_ext_state);
7083 7085 if (mdi_prop_lookup_string(npip, "path-class", &pclass)
7084 7086 != MDI_SUCCESS) {
7085 7087 pip = npip;
7086 7088 sps = mdi_select_path(cdip, NULL,
7087 7089 (MDI_SELECT_ONLINE_PATH |
7088 7090 MDI_SELECT_STANDBY_PATH |
7089 7091 MDI_SELECT_USER_DISABLE_PATH),
7090 7092 pip, &npip);
7091 7093 mdi_rele_path(pip);
7092 7094 continue;
7093 7095 }
7094 7096 if (strcmp(pclass, pclass2) == 0) {
7095 7097 if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
7096 7098 svp = (scsi_vhci_priv_t *)
7097 7099 mdi_pi_get_vhci_private(npip);
7098 7100 VHCI_DEBUG(1, (CE_NOTE, NULL,
7099 7101 "!vhci_failover(8)(%s): "
7100 7102 "pinging path 0x%p\n",
7101 7103 guid, (void *)npip));
7102 7104 if (sfo->sfo_path_ping(svp->svp_psd,
7103 7105 vlun->svl_fops_ctpriv) == 1) {
7104 7106 mdi_pi_set_state(npip,
7105 7107 MDI_PATHINFO_STATE_ONLINE);
7106 7108 VHCI_DEBUG(1, (CE_NOTE, NULL,
7107 7109 "!vhci_failover(9)(%s): "
7108 7110 "path 0x%p ping successful, "
7109 7111 "marked online\n", guid,
7110 7112 (void *)npip));
7111 7113 MDI_PI_ERRSTAT(npip, MDI_PI_FAILTO);
7112 7114 }
7113 7115 }
7114 7116 } else if ((s_pclass != NULL) && (strcmp(pclass, s_pclass)
7115 7117 == 0)) {
7116 7118 if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
7117 7119 mdi_pi_set_state(npip,
7118 7120 MDI_PATHINFO_STATE_STANDBY);
7119 7121 VHCI_DEBUG(1, (CE_NOTE, NULL,
7120 7122 "!vhci_failover(10)(%s): path 0x%p marked "
7121 7123 "STANDBY\n", guid, (void *)npip));
7122 7124 MDI_PI_ERRSTAT(npip, MDI_PI_FAILFROM);
7123 7125 }
7124 7126 }
7125 7127 (void) mdi_prop_free(pclass);
7126 7128 pip = npip;
7127 7129 sps = mdi_select_path(cdip, NULL, (MDI_SELECT_ONLINE_PATH |
7128 7130 MDI_SELECT_STANDBY_PATH|MDI_SELECT_USER_DISABLE_PATH),
7129 7131 pip, &npip);
7130 7132 mdi_rele_path(pip);
7131 7133 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7132 7134
7133 7135 /*
7134 7136 * Update the AccessState of related MP-API TPGs
7135 7137 */
7136 7138 (void) vhci_mpapi_update_tpg_acc_state_for_lu(vhci, vlun);
7137 7139
7138 7140 vhci_log(CE_NOTE, vdip, "!Failover operation completed successfully "
7139 7141 "for device %s (GUID %s): failed over from %s to %s",
7140 7142 ddi_node_name(cdip), guid, ((s_pclass == NULL) ? "<none>" :
7141 7143 s_pclass), pclass2);
7142 7144 ptr1 = kmem_alloc(strlen(pclass2)+1, KM_SLEEP);
7143 7145 (void) strlcpy(ptr1, pclass2, (strlen(pclass2)+1));
7144 7146 mutex_enter(&vlun->svl_mutex);
7145 7147 ptr2 = vlun->svl_active_pclass;
7146 7148 vlun->svl_active_pclass = ptr1;
7147 7149 mutex_exit(&vlun->svl_mutex);
7148 7150 if (ptr2) {
7149 7151 kmem_free(ptr2, strlen(ptr2)+1);
7150 7152 }
7151 7153 mutex_enter(&vhci->vhci_mutex);
7152 7154 scsi_hba_reset_notify_callback(&vhci->vhci_mutex,
7153 7155 &vhci->vhci_reset_notify_listf);
7154 7156 /* All reservations are cleared upon these resets. */
7155 7157 vlun->svl_flags &= ~VLUN_RESERVE_ACTIVE_FLG;
7156 7158 mutex_exit(&vhci->vhci_mutex);
7157 7159 VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_failover(11): DONE! Active "
7158 7160 "pathclass for %s is now %s\n", guid, pclass2));
7159 7161 retval = MDI_SUCCESS;
7160 7162
7161 7163 done:
7162 7164 vlun->svl_failover_status = retval;
7163 7165 if (flags == MDI_FAILOVER_ASYNC) {
7164 7166 VHCI_RELEASE_LUN(vlun);
7165 7167 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7166 7168 "releasing lun, as failover was ASYNC\n"));
7167 7169 } else {
7168 7170 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_failover(12): DONE! "
7169 7171 "NOT releasing lun, as failover was SYNC\n"));
7170 7172 }
7171 7173 return (retval);
7172 7174 }
7173 7175
7174 7176 /*
7175 7177 * vhci_client_attached is called after the successful attach of a
7176 7178 * client devinfo node.
7177 7179 */
7178 7180 static void
7179 7181 vhci_client_attached(dev_info_t *cdip)
7180 7182 {
7181 7183 mdi_pathinfo_t *pip;
7182 7184 int circular;
7183 7185
7184 7186 /*
7185 7187 * At this point the client has attached and it's instance number is
7186 7188 * valid, so we can set up kstats. We need to do this here because it
7187 7189 * is possible for paths to go online prior to client attach, in which
7188 7190 * case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
7189 7191 * was a noop.
7190 7192 */
7191 7193 ndi_devi_enter(cdip, &circular);
7192 7194 for (pip = mdi_get_next_phci_path(cdip, NULL); pip;
7193 7195 pip = mdi_get_next_phci_path(cdip, pip))
7194 7196 vhci_kstat_create_pathinfo(pip);
7195 7197 ndi_devi_exit(cdip, circular);
7196 7198 }
7197 7199
7198 7200 /*
7199 7201 * quiesce all of the online paths
7200 7202 */
7201 7203 static int
7202 7204 vhci_quiesce_paths(dev_info_t *vdip, dev_info_t *cdip, scsi_vhci_lun_t *vlun,
7203 7205 char *guid, char *active_pclass_ptr)
7204 7206 {
7205 7207 scsi_vhci_priv_t *svp;
7206 7208 char *s_pclass = NULL;
7207 7209 mdi_pathinfo_t *npip, *pip;
7208 7210 int sps;
7209 7211
7210 7212 /* quiesce currently active paths */
7211 7213 s_pclass = NULL;
7212 7214 pip = npip = NULL;
7213 7215 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH, NULL, &npip);
7214 7216 if ((npip == NULL) || (sps != MDI_SUCCESS)) {
7215 7217 return (1);
7216 7218 }
7217 7219 do {
7218 7220 if (mdi_prop_lookup_string(npip, "path-class",
7219 7221 &s_pclass) != MDI_SUCCESS) {
7220 7222 mdi_rele_path(npip);
7221 7223 vhci_log(CE_NOTE, vdip, "!Failover operation failed "
7222 7224 "for device %s (GUID %s) due to an internal "
7223 7225 "error", ddi_node_name(cdip), guid);
7224 7226 return (1);
7225 7227 }
7226 7228 if (strcmp(s_pclass, active_pclass_ptr) == 0) {
7227 7229 /*
7228 7230 * quiesce path. Free s_pclass since
7229 7231 * we don't need it anymore
7230 7232 */
7231 7233 VHCI_DEBUG(1, (CE_NOTE, NULL,
7232 7234 "!vhci_failover(2)(%s): failing over "
7233 7235 "from %s; quiescing path %p\n",
7234 7236 guid, s_pclass, (void *)npip));
7235 7237 (void) mdi_prop_free(s_pclass);
7236 7238 svp = (scsi_vhci_priv_t *)
7237 7239 mdi_pi_get_vhci_private(npip);
7238 7240 if (svp == NULL) {
7239 7241 VHCI_DEBUG(1, (CE_NOTE, NULL,
7240 7242 "!vhci_failover(2.5)(%s): no "
7241 7243 "client priv! %p offlined?\n",
7242 7244 guid, (void *)npip));
7243 7245 pip = npip;
7244 7246 sps = mdi_select_path(cdip, NULL,
7245 7247 MDI_SELECT_ONLINE_PATH, pip, &npip);
7246 7248 mdi_rele_path(pip);
7247 7249 continue;
7248 7250 }
7249 7251 if (scsi_abort(&svp->svp_psd->sd_address, NULL)
7250 7252 == 0) {
7251 7253 (void) vhci_recovery_reset(vlun,
7252 7254 &svp->svp_psd->sd_address, FALSE,
7253 7255 VHCI_DEPTH_TARGET);
7254 7256 }
7255 7257 mutex_enter(&svp->svp_mutex);
7256 7258 if (svp->svp_cmds == 0) {
7257 7259 VHCI_DEBUG(1, (CE_NOTE, NULL,
7258 7260 "!vhci_failover(3)(%s):"
7259 7261 "quiesced path %p\n", guid, (void *)npip));
7260 7262 } else {
7261 7263 while (svp->svp_cmds != 0) {
7262 7264 cv_wait(&svp->svp_cv, &svp->svp_mutex);
7263 7265 VHCI_DEBUG(1, (CE_NOTE, NULL,
7264 7266 "!vhci_failover(3.cv)(%s):"
7265 7267 "quiesced path %p\n", guid,
7266 7268 (void *)npip));
7267 7269 }
7268 7270 }
7269 7271 mutex_exit(&svp->svp_mutex);
7270 7272 } else {
7271 7273 /*
7272 7274 * make sure we freeup the memory
7273 7275 */
7274 7276 (void) mdi_prop_free(s_pclass);
7275 7277 }
7276 7278 pip = npip;
7277 7279 sps = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH,
7278 7280 pip, &npip);
7279 7281 mdi_rele_path(pip);
7280 7282 } while ((npip != NULL) && (sps == MDI_SUCCESS));
7281 7283 return (0);
7282 7284 }
7283 7285
7284 7286 static struct scsi_vhci_lun *
7285 7287 vhci_lun_lookup(dev_info_t *tgt_dip)
7286 7288 {
7287 7289 return ((struct scsi_vhci_lun *)
7288 7290 mdi_client_get_vhci_private(tgt_dip));
7289 7291 }
7290 7292
7291 7293 static struct scsi_vhci_lun *
7292 7294 vhci_lun_lookup_alloc(dev_info_t *tgt_dip, char *guid, int *didalloc)
7293 7295 {
7294 7296 struct scsi_vhci_lun *svl;
7295 7297
7296 7298 if (svl = vhci_lun_lookup(tgt_dip)) {
7297 7299 return (svl);
7298 7300 }
7299 7301
7300 7302 svl = kmem_zalloc(sizeof (*svl), KM_SLEEP);
7301 7303 svl->svl_lun_wwn = kmem_zalloc(strlen(guid)+1, KM_SLEEP);
7302 7304 (void) strcpy(svl->svl_lun_wwn, guid);
7303 7305 mutex_init(&svl->svl_mutex, NULL, MUTEX_DRIVER, NULL);
7304 7306 cv_init(&svl->svl_cv, NULL, CV_DRIVER, NULL);
7305 7307 sema_init(&svl->svl_pgr_sema, 1, NULL, SEMA_DRIVER, NULL);
7306 7308 svl->svl_waiting_for_activepath = 1;
7307 7309 svl->svl_sector_size = 1;
7308 7310 mdi_client_set_vhci_private(tgt_dip, svl);
7309 7311 *didalloc = 1;
7310 7312 VHCI_DEBUG(1, (CE_NOTE, NULL,
7311 7313 "vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
7312 7314 guid, (void *)svl));
7313 7315 return (svl);
7314 7316 }
7315 7317
7316 7318 static void
7317 7319 vhci_lun_free(struct scsi_vhci_lun *dvlp, struct scsi_device *sd)
7318 7320 {
7319 7321 char *guid;
7320 7322
7321 7323 guid = dvlp->svl_lun_wwn;
7322 7324 ASSERT(guid != NULL);
7323 7325 VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_lun_free: %s\n", guid));
7324 7326
7325 7327 mutex_enter(&dvlp->svl_mutex);
7326 7328 if (dvlp->svl_active_pclass != NULL) {
7327 7329 kmem_free(dvlp->svl_active_pclass,
7328 7330 strlen(dvlp->svl_active_pclass)+1);
7329 7331 }
7330 7332 dvlp->svl_active_pclass = NULL;
7331 7333 mutex_exit(&dvlp->svl_mutex);
7332 7334
7333 7335 if (dvlp->svl_lun_wwn != NULL) {
7334 7336 kmem_free(dvlp->svl_lun_wwn, strlen(dvlp->svl_lun_wwn)+1);
7335 7337 }
7336 7338 dvlp->svl_lun_wwn = NULL;
7337 7339
7338 7340 if (dvlp->svl_fops_name) {
7339 7341 kmem_free(dvlp->svl_fops_name, strlen(dvlp->svl_fops_name)+1);
7340 7342 }
7341 7343 dvlp->svl_fops_name = NULL;
7342 7344
7343 7345 if (dvlp->svl_fops_ctpriv != NULL &&
7344 7346 dvlp->svl_fops != NULL) {
7345 7347 dvlp->svl_fops->sfo_device_unprobe(sd, dvlp->svl_fops_ctpriv);
7346 7348 }
7347 7349
7348 7350 if (dvlp->svl_flags & VLUN_TASK_D_ALIVE_FLG)
7349 7351 taskq_destroy(dvlp->svl_taskq);
7350 7352
7351 7353 mutex_destroy(&dvlp->svl_mutex);
7352 7354 cv_destroy(&dvlp->svl_cv);
7353 7355 sema_destroy(&dvlp->svl_pgr_sema);
7354 7356 kmem_free(dvlp, sizeof (*dvlp));
7355 7357 /*
7356 7358 * vhci_lun_free may be called before the tgt_dip
7357 7359 * initialization so check if the sd is NULL.
7358 7360 */
7359 7361 if (sd != NULL)
7360 7362 scsi_device_hba_private_set(sd, NULL);
7361 7363 }
7362 7364
7363 7365 int
7364 7366 vhci_do_scsi_cmd(struct scsi_pkt *pkt)
7365 7367 {
7366 7368 int err = 0;
7367 7369 int retry_cnt = 0;
7368 7370 uint8_t *sns, skey;
7369 7371
7370 7372 #ifdef DEBUG
7371 7373 if (vhci_debug > 5) {
7372 7374 vhci_print_cdb(pkt->pkt_address.a_hba_tran->tran_hba_dip,
7373 7375 CE_WARN, "Vhci command", pkt->pkt_cdbp);
7374 7376 }
7375 7377 #endif
7376 7378
7377 7379 retry:
7378 7380 err = scsi_poll(pkt);
7379 7381 if (err) {
7380 7382 if (pkt->pkt_cdbp[0] == SCMD_RELEASE) {
7381 7383 if (SCBP_C(pkt) == STATUS_RESERVATION_CONFLICT) {
7382 7384 VHCI_DEBUG(1, (CE_NOTE, NULL,
7383 7385 "!v_s_do_s_c: RELEASE conflict\n"));
7384 7386 return (0);
7385 7387 }
7386 7388 }
7387 7389 if (retry_cnt++ < 6) {
7388 7390 VHCI_DEBUG(1, (CE_WARN, NULL,
7389 7391 "!v_s_do_s_c:retry packet 0x%p "
7390 7392 "status 0x%x reason %s",
7391 7393 (void *)pkt, SCBP_C(pkt),
7392 7394 scsi_rname(pkt->pkt_reason)));
7393 7395 if ((pkt->pkt_reason == CMD_CMPLT) &&
7394 7396 (SCBP_C(pkt) == STATUS_CHECK) &&
7395 7397 (pkt->pkt_state & STATE_ARQ_DONE)) {
7396 7398 sns = (uint8_t *)
7397 7399 &(((struct scsi_arq_status *)(uintptr_t)
7398 7400 (pkt->pkt_scbp))->sts_sensedata);
7399 7401 skey = scsi_sense_key(sns);
7400 7402 VHCI_DEBUG(1, (CE_WARN, NULL,
7401 7403 "!v_s_do_s_c:retry "
7402 7404 "packet 0x%p sense data %s", (void *)pkt,
7403 7405 scsi_sname(skey)));
7404 7406 }
7405 7407 goto retry;
7406 7408 }
7407 7409 VHCI_DEBUG(1, (CE_WARN, NULL,
7408 7410 "!v_s_do_s_c: failed transport 0x%p 0x%x",
7409 7411 (void *)pkt, SCBP_C(pkt)));
7410 7412 return (0);
7411 7413 }
7412 7414
7413 7415 switch (pkt->pkt_reason) {
7414 7416 case CMD_TIMEOUT:
7415 7417 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt timed "
7416 7418 "out (pkt 0x%p)", (void *)pkt));
7417 7419 return (0);
7418 7420 case CMD_CMPLT:
7419 7421 switch (SCBP_C(pkt)) {
7420 7422 case STATUS_GOOD:
7421 7423 break;
7422 7424 case STATUS_CHECK:
7423 7425 if (pkt->pkt_state & STATE_ARQ_DONE) {
7424 7426 sns = (uint8_t *)&(((
7425 7427 struct scsi_arq_status *)
7426 7428 (uintptr_t)
7427 7429 (pkt->pkt_scbp))->
7428 7430 sts_sensedata);
7429 7431 skey = scsi_sense_key(sns);
7430 7432 if ((skey ==
7431 7433 KEY_UNIT_ATTENTION) ||
7432 7434 (skey ==
7433 7435 KEY_NOT_READY)) {
7434 7436 /*
7435 7437 * clear unit attn.
7436 7438 */
7437 7439
7438 7440 VHCI_DEBUG(1,
7439 7441 (CE_WARN, NULL,
7440 7442 "!v_s_do_s_c: "
7441 7443 "retry "
7442 7444 "packet 0x%p sense "
7443 7445 "data %s",
7444 7446 (void *)pkt,
7445 7447 scsi_sname
7446 7448 (skey)));
7447 7449 goto retry;
7448 7450 }
7449 7451 VHCI_DEBUG(4, (CE_WARN, NULL,
7450 7452 "!ARQ while "
7451 7453 "transporting "
7452 7454 "(pkt 0x%p)",
7453 7455 (void *)pkt));
7454 7456 return (0);
7455 7457 }
7456 7458 return (0);
7457 7459 default:
7458 7460 VHCI_DEBUG(1, (CE_WARN, NULL,
7459 7461 "!Bad status returned "
7460 7462 "(pkt 0x%p, status %x)",
7461 7463 (void *)pkt, SCBP_C(pkt)));
7462 7464 return (0);
7463 7465 }
7464 7466 break;
7465 7467 case CMD_INCOMPLETE:
7466 7468 case CMD_RESET:
7467 7469 case CMD_ABORTED:
7468 7470 case CMD_TRAN_ERR:
7469 7471 if (retry_cnt++ < 1) {
7470 7472 VHCI_DEBUG(1, (CE_WARN, NULL,
7471 7473 "!v_s_do_s_c: retry packet 0x%p %s",
7472 7474 (void *)pkt, scsi_rname(pkt->pkt_reason)));
7473 7475 goto retry;
7474 7476 }
7475 7477 /* FALLTHROUGH */
7476 7478 default:
7477 7479 VHCI_DEBUG(1, (CE_WARN, NULL, "!pkt did not "
7478 7480 "complete successfully (pkt 0x%p,"
7479 7481 "reason %x)", (void *)pkt, pkt->pkt_reason));
7480 7482 return (0);
7481 7483 }
7482 7484 return (1);
7483 7485 }
7484 7486
7485 7487 static int
7486 7488 vhci_quiesce_lun(struct scsi_vhci_lun *vlun)
7487 7489 {
7488 7490 mdi_pathinfo_t *pip, *spip;
7489 7491 dev_info_t *cdip;
7490 7492 struct scsi_vhci_priv *svp;
7491 7493 mdi_pathinfo_state_t pstate;
7492 7494 uint32_t p_ext_state;
7493 7495 int circular;
7494 7496
7495 7497 cdip = vlun->svl_dip;
7496 7498 pip = spip = NULL;
7497 7499 ndi_devi_enter(cdip, &circular);
7498 7500 pip = mdi_get_next_phci_path(cdip, NULL);
7499 7501 while (pip != NULL) {
7500 7502 (void) mdi_pi_get_state2(pip, &pstate, &p_ext_state);
7501 7503 if (pstate != MDI_PATHINFO_STATE_ONLINE) {
7502 7504 spip = pip;
7503 7505 pip = mdi_get_next_phci_path(cdip, spip);
7504 7506 continue;
7505 7507 }
7506 7508 mdi_hold_path(pip);
7507 7509 ndi_devi_exit(cdip, circular);
7508 7510 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
7509 7511 mutex_enter(&svp->svp_mutex);
7510 7512 while (svp->svp_cmds != 0) {
7511 7513 if (cv_reltimedwait(&svp->svp_cv, &svp->svp_mutex,
7512 7514 drv_usectohz(vhci_path_quiesce_timeout * 1000000),
7513 7515 TR_CLOCK_TICK) == -1) {
7514 7516 mutex_exit(&svp->svp_mutex);
7515 7517 mdi_rele_path(pip);
7516 7518 VHCI_DEBUG(1, (CE_WARN, NULL,
7517 7519 "Quiesce of lun is not successful "
7518 7520 "vlun: 0x%p.", (void *)vlun));
7519 7521 return (0);
7520 7522 }
7521 7523 }
7522 7524 mutex_exit(&svp->svp_mutex);
7523 7525 ndi_devi_enter(cdip, &circular);
7524 7526 spip = pip;
7525 7527 pip = mdi_get_next_phci_path(cdip, spip);
7526 7528 mdi_rele_path(spip);
7527 7529 }
7528 7530 ndi_devi_exit(cdip, circular);
7529 7531 return (1);
7530 7532 }
7531 7533
7532 7534 static int
7533 7535 vhci_pgr_validate_and_register(scsi_vhci_priv_t *svp)
7534 7536 {
7535 7537 scsi_vhci_lun_t *vlun;
7536 7538 vhci_prout_t *prout;
7537 7539 int rval, success;
7538 7540 mdi_pathinfo_t *pip, *npip;
7539 7541 scsi_vhci_priv_t *osvp;
7540 7542 dev_info_t *cdip;
7541 7543 uchar_t cdb_1;
7542 7544 uchar_t temp_res_key[MHIOC_RESV_KEY_SIZE];
7543 7545
7544 7546
7545 7547 /*
7546 7548 * see if there are any other paths available; if none,
7547 7549 * then there is nothing to do.
7548 7550 */
7549 7551 cdip = svp->svp_svl->svl_dip;
7550 7552 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7551 7553 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7552 7554 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7553 7555 VHCI_DEBUG(4, (CE_NOTE, NULL,
7554 7556 "%s%d: vhci_pgr_validate_and_register: first path\n",
7555 7557 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7556 7558 return (1);
7557 7559 }
7558 7560
7559 7561 vlun = svp->svp_svl;
7560 7562 prout = &vlun->svl_prout;
7561 7563 ASSERT(vlun->svl_pgr_active != 0);
7562 7564
7563 7565 /*
7564 7566 * When the path was busy/offlined, some other host might have
7565 7567 * cleared this key. Validate key on some other path first.
7566 7568 * If it fails, return failure.
7567 7569 */
7568 7570
7569 7571 npip = pip;
7570 7572 pip = NULL;
7571 7573 success = 0;
7572 7574
7573 7575 /* Save the res key */
7574 7576 bcopy(prout->res_key, temp_res_key, MHIOC_RESV_KEY_SIZE);
7575 7577
7576 7578 /*
7577 7579 * Sometimes CDB from application can be a Register_And_Ignore.
7578 7580 * Instead of validation, this cdb would result in force registration.
7579 7581 * Convert it to normal cdb for validation.
7580 7582 * After that be sure to restore the cdb.
7581 7583 */
7582 7584 cdb_1 = vlun->svl_cdb[1];
7583 7585 vlun->svl_cdb[1] &= 0xe0;
7584 7586
7585 7587 do {
7586 7588 osvp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(npip);
7587 7589 if (osvp == NULL) {
7588 7590 VHCI_DEBUG(4, (CE_NOTE, NULL,
7589 7591 "vhci_pgr_validate_and_register: no "
7590 7592 "client priv! 0x%p offlined?\n",
7591 7593 (void *)npip));
7592 7594 goto next_path_1;
7593 7595 }
7594 7596
7595 7597 if (osvp == svp) {
7596 7598 VHCI_DEBUG(4, (CE_NOTE, NULL,
7597 7599 "vhci_pgr_validate_and_register: same svp 0x%p"
7598 7600 " npip 0x%p vlun 0x%p\n",
7599 7601 (void *)svp, (void *)npip, (void *)vlun));
7600 7602 goto next_path_1;
7601 7603 }
7602 7604
7603 7605 VHCI_DEBUG(4, (CE_NOTE, NULL,
7604 7606 "vhci_pgr_validate_and_register: First validate on"
7605 7607 " osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
7606 7608 " cdb1 %x\n", (void *)osvp, (void *)vlun,
7607 7609 (void *)curthread, vlun->svl_cdb[1]));
7608 7610 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy:");
7609 7611
7610 7612 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7611 7613
7612 7614 VHCI_DEBUG(4, (CE_WARN, NULL, "vlun 0x%p After bcopy",
7613 7615 (void *)vlun));
7614 7616 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7615 7617
7616 7618 rval = vhci_do_prout(osvp);
7617 7619 if (rval == 1) {
7618 7620 VHCI_DEBUG(4, (CE_NOTE, NULL,
7619 7621 "%s%d: vhci_pgr_validate_and_register: key"
7620 7622 " validated thread 0x%p\n", ddi_driver_name(cdip),
7621 7623 ddi_get_instance(cdip), (void *)curthread));
7622 7624 pip = npip;
7623 7625 success = 1;
7624 7626 break;
7625 7627 } else {
7626 7628 VHCI_DEBUG(4, (CE_NOTE, NULL,
7627 7629 "vhci_pgr_validate_and_register: First validation"
7628 7630 " on osvp 0x%p failed %x\n", (void *)osvp, rval));
7629 7631 vhci_print_prout_keys(vlun, "v_pgr_val_reg: failed:");
7630 7632 }
7631 7633
7632 7634 /*
7633 7635 * Try other paths
7634 7636 */
7635 7637 next_path_1:
7636 7638 pip = npip;
7637 7639 rval = mdi_select_path(cdip, NULL,
7638 7640 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7639 7641 pip, &npip);
7640 7642 mdi_rele_path(pip);
7641 7643 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7642 7644
7643 7645
7644 7646 /* Be sure to restore original cdb */
7645 7647 vlun->svl_cdb[1] = cdb_1;
7646 7648
7647 7649 /* Restore the res_key */
7648 7650 bcopy(temp_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7649 7651
7650 7652 /*
7651 7653 * If key could not be registered on any path for the first time,
7652 7654 * return success as online should still continue.
7653 7655 */
7654 7656 if (success == 0) {
7655 7657 return (1);
7656 7658 }
7657 7659
7658 7660 ASSERT(pip != NULL);
7659 7661
7660 7662 /*
7661 7663 * Force register on new path
7662 7664 */
7663 7665 cdb_1 = vlun->svl_cdb[1]; /* store the cdb */
7664 7666
7665 7667 vlun->svl_cdb[1] &= 0xe0;
7666 7668 vlun->svl_cdb[1] |= VHCI_PROUT_R_AND_IGNORE;
7667 7669
7668 7670 vhci_print_prout_keys(vlun, "v_pgr_val_reg: keys before bcopy: ");
7669 7671
7670 7672 bcopy(prout->active_service_key, prout->service_key,
7671 7673 MHIOC_RESV_KEY_SIZE);
7672 7674 bcopy(prout->active_res_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7673 7675
7674 7676 vhci_print_prout_keys(vlun, "v_pgr_val_reg:keys after bcopy: ");
7675 7677
7676 7678 rval = vhci_do_prout(svp);
7677 7679 vlun->svl_cdb[1] = cdb_1; /* restore the cdb */
7678 7680 if (rval != 1) {
7679 7681 VHCI_DEBUG(4, (CE_NOTE, NULL,
7680 7682 "vhci_pgr_validate_and_register: register on new"
7681 7683 " path 0x%p svp 0x%p failed %x\n",
7682 7684 (void *)pip, (void *)svp, rval));
7683 7685 vhci_print_prout_keys(vlun, "v_pgr_val_reg: reg failed: ");
7684 7686 mdi_rele_path(pip);
7685 7687 return (0);
7686 7688 }
7687 7689
7688 7690 if (bcmp(prout->service_key, zero_key, MHIOC_RESV_KEY_SIZE) == 0) {
7689 7691 VHCI_DEBUG(4, (CE_NOTE, NULL,
7690 7692 "vhci_pgr_validate_and_register: zero service key\n"));
7691 7693 mdi_rele_path(pip);
7692 7694 return (rval);
7693 7695 }
7694 7696
7695 7697 /*
7696 7698 * While the key was force registered, some other host might have
7697 7699 * cleared the key. Re-validate key on another pre-existing path
7698 7700 * before declaring success.
7699 7701 */
7700 7702 npip = pip;
7701 7703 pip = NULL;
7702 7704
7703 7705 /*
7704 7706 * Sometimes CDB from application can be Register and Ignore.
7705 7707 * Instead of validation, it would result in force registration.
7706 7708 * Convert it to normal cdb for validation.
7707 7709 * After that be sure to restore the cdb.
7708 7710 */
7709 7711 cdb_1 = vlun->svl_cdb[1];
7710 7712 vlun->svl_cdb[1] &= 0xe0;
7711 7713 success = 0;
7712 7714
7713 7715 do {
7714 7716 osvp = (scsi_vhci_priv_t *)
7715 7717 mdi_pi_get_vhci_private(npip);
7716 7718 if (osvp == NULL) {
7717 7719 VHCI_DEBUG(4, (CE_NOTE, NULL,
7718 7720 "vhci_pgr_validate_and_register: no "
7719 7721 "client priv! 0x%p offlined?\n",
7720 7722 (void *)npip));
7721 7723 goto next_path_2;
7722 7724 }
7723 7725
7724 7726 if (osvp == svp) {
7725 7727 VHCI_DEBUG(4, (CE_NOTE, NULL,
7726 7728 "vhci_pgr_validate_and_register: same osvp 0x%p"
7727 7729 " npip 0x%p vlun 0x%p\n",
7728 7730 (void *)svp, (void *)npip, (void *)vlun));
7729 7731 goto next_path_2;
7730 7732 }
7731 7733
7732 7734 VHCI_DEBUG(4, (CE_NOTE, NULL,
7733 7735 "vhci_pgr_validate_and_register: Re-validation on"
7734 7736 " osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
7735 7737 (void *)osvp, (void *)vlun, vlun->svl_cdb[1]));
7736 7738 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7737 7739
7738 7740 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7739 7741
7740 7742 vhci_print_prout_keys(vlun, "v_pgr_val_reg: after bcopy: ");
7741 7743
7742 7744 rval = vhci_do_prout(osvp);
7743 7745 if (rval == 1) {
7744 7746 VHCI_DEBUG(4, (CE_NOTE, NULL,
7745 7747 "%s%d: vhci_pgr_validate_and_register: key"
7746 7748 " validated thread 0x%p\n", ddi_driver_name(cdip),
7747 7749 ddi_get_instance(cdip), (void *)curthread));
7748 7750 pip = npip;
7749 7751 success = 1;
7750 7752 break;
7751 7753 } else {
7752 7754 VHCI_DEBUG(4, (CE_NOTE, NULL,
7753 7755 "vhci_pgr_validate_and_register: Re-validation on"
7754 7756 " osvp 0x%p failed %x\n", (void *)osvp, rval));
7755 7757 vhci_print_prout_keys(vlun,
7756 7758 "v_pgr_val_reg: reval failed: ");
7757 7759 }
7758 7760
7759 7761 /*
7760 7762 * Try other paths
7761 7763 */
7762 7764 next_path_2:
7763 7765 pip = npip;
7764 7766 rval = mdi_select_path(cdip, NULL,
7765 7767 MDI_SELECT_ONLINE_PATH|MDI_SELECT_STANDBY_PATH,
7766 7768 pip, &npip);
7767 7769 mdi_rele_path(pip);
7768 7770 } while ((rval == MDI_SUCCESS) && (npip != NULL));
7769 7771
7770 7772 /* Be sure to restore original cdb */
7771 7773 vlun->svl_cdb[1] = cdb_1;
7772 7774
7773 7775 if (success == 1) {
7774 7776 /* Successfully validated registration */
7775 7777 mdi_rele_path(pip);
7776 7778 return (1);
7777 7779 }
7778 7780
7779 7781 VHCI_DEBUG(4, (CE_WARN, NULL, "key validation failed"));
7780 7782
7781 7783 /*
7782 7784 * key invalid, back out by registering key value of 0
7783 7785 */
7784 7786 VHCI_DEBUG(4, (CE_NOTE, NULL,
7785 7787 "vhci_pgr_validate_and_register: backout on"
7786 7788 " svp 0x%p being done\n", (void *)svp));
7787 7789 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7788 7790
7789 7791 bcopy(prout->service_key, prout->res_key, MHIOC_RESV_KEY_SIZE);
7790 7792 bzero(prout->service_key, MHIOC_RESV_KEY_SIZE);
7791 7793
7792 7794 vhci_print_prout_keys(vlun, "v_pgr_val_reg: before bcopy: ");
7793 7795
7794 7796 /*
7795 7797 * Get a new path
7796 7798 */
7797 7799 rval = mdi_select_path(cdip, NULL, MDI_SELECT_ONLINE_PATH |
7798 7800 MDI_SELECT_STANDBY_PATH, NULL, &pip);
7799 7801 if ((rval != MDI_SUCCESS) || (pip == NULL)) {
7800 7802 VHCI_DEBUG(4, (CE_NOTE, NULL,
7801 7803 "%s%d: vhci_pgr_validate_and_register: no valid pip\n",
7802 7804 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7803 7805 return (0);
7804 7806 }
7805 7807
7806 7808 if ((rval = vhci_do_prout(svp)) != 1) {
7807 7809 VHCI_DEBUG(4, (CE_NOTE, NULL,
7808 7810 "vhci_pgr_validate_and_register: backout on"
7809 7811 " svp 0x%p failed\n", (void *)svp));
7810 7812 vhci_print_prout_keys(vlun, "backout failed");
7811 7813
7812 7814 VHCI_DEBUG(4, (CE_WARN, NULL,
7813 7815 "%s%d: vhci_pgr_validate_and_register: key"
7814 7816 " validation and backout failed", ddi_driver_name(cdip),
7815 7817 ddi_get_instance(cdip)));
7816 7818 if (rval == VHCI_PGR_ILLEGALOP) {
7817 7819 VHCI_DEBUG(4, (CE_WARN, NULL,
7818 7820 "%s%d: vhci_pgr_validate_and_register: key"
7819 7821 " already cleared", ddi_driver_name(cdip),
7820 7822 ddi_get_instance(cdip)));
7821 7823 rval = 1;
7822 7824 } else
7823 7825 rval = 0;
7824 7826 } else {
7825 7827 VHCI_DEBUG(4, (CE_NOTE, NULL,
7826 7828 "%s%d: vhci_pgr_validate_and_register: key"
7827 7829 " validation failed, key backed out\n",
7828 7830 ddi_driver_name(cdip), ddi_get_instance(cdip)));
7829 7831 vhci_print_prout_keys(vlun, "v_pgr_val_reg: key backed out: ");
7830 7832 }
7831 7833 mdi_rele_path(pip);
7832 7834
7833 7835 return (rval);
7834 7836 }
7835 7837
7836 7838 /*
7837 7839 * taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures
7838 7840 * that vhci_scsi_start is not called in interrupt context.
7839 7841 * As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
7840 7842 * need to complete the command if something goes wrong.
7841 7843 */
7842 7844 static void
7843 7845 vhci_dispatch_scsi_start(void *arg)
7844 7846 {
7845 7847 struct vhci_pkt *vpkt = (struct vhci_pkt *)arg;
7846 7848 struct scsi_pkt *tpkt = vpkt->vpkt_tgt_pkt;
7847 7849 int rval = TRAN_BUSY;
7848 7850
7849 7851 VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_dispatch_scsi_start: sending"
7850 7852 " scsi-2 reserve for 0x%p\n",
7851 7853 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7852 7854
7853 7855 /*
7854 7856 * To prevent the taskq from being called recursively we set the
7855 7857 * the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
7856 7858 */
7857 7859 vpkt->vpkt_state |= VHCI_PKT_THRU_TASKQ;
7858 7860
7859 7861 /*
7860 7862 * Wait for the transport to get ready to send packets
7861 7863 * and if it times out, it will return something other than
7862 7864 * TRAN_BUSY. The vhci_reserve_delay may want to
7863 7865 * get tuned for other transports and is therefore a global.
7864 7866 * Using delay since this routine is called by taskq dispatch
7865 7867 * and not called during interrupt context.
7866 7868 */
7867 7869 while ((rval = vhci_scsi_start(&(vpkt->vpkt_tgt_pkt->pkt_address),
7868 7870 vpkt->vpkt_tgt_pkt)) == TRAN_BUSY) {
7869 7871 delay(drv_usectohz(vhci_reserve_delay));
7870 7872 }
7871 7873
7872 7874 switch (rval) {
7873 7875 case TRAN_ACCEPT:
7874 7876 return;
7875 7877
7876 7878 default:
7877 7879 /*
7878 7880 * This pkt shall be retried, and to ensure another taskq
7879 7881 * is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
7880 7882 * flag.
7881 7883 */
7882 7884 vpkt->vpkt_state &= ~VHCI_PKT_THRU_TASKQ;
7883 7885
7884 7886 /* Ensure that the pkt is retried without a reset */
7885 7887 tpkt->pkt_reason = CMD_ABORTED;
7886 7888 tpkt->pkt_statistics |= STAT_ABORTED;
7887 7889 VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_dispatch_scsi_start: "
7888 7890 "TRAN_rval %d returned for dip 0x%p", rval,
7889 7891 (void *)ADDR2DIP(&(vpkt->vpkt_tgt_pkt->pkt_address))));
7890 7892 break;
7891 7893 }
7892 7894
7893 7895 /*
7894 7896 * vpkt_org_vpkt should always be NULL here if the retry command
7895 7897 * has been successfully dispatched. If vpkt_org_vpkt != NULL at
7896 7898 * this point, it is an error so restore the original vpkt and
7897 7899 * return an error to the target driver so it can retry the
7898 7900 * command as appropriate.
7899 7901 */
7900 7902 if (vpkt->vpkt_org_vpkt != NULL) {
7901 7903 struct vhci_pkt *new_vpkt = vpkt;
7902 7904 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
7903 7905 mdi_pi_get_vhci_private(vpkt->vpkt_path);
7904 7906
7905 7907 vpkt = vpkt->vpkt_org_vpkt;
7906 7908
7907 7909 vpkt->vpkt_tgt_pkt->pkt_reason = tpkt->pkt_reason;
7908 7910 vpkt->vpkt_tgt_pkt->pkt_statistics = tpkt->pkt_statistics;
7909 7911
7910 7912 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
7911 7913 new_vpkt->vpkt_tgt_pkt);
7912 7914
7913 7915 tpkt = vpkt->vpkt_tgt_pkt;
7914 7916 }
7915 7917
7916 7918 scsi_hba_pkt_comp(tpkt);
7917 7919 }
7918 7920
7919 7921 static void
7920 7922 vhci_initiate_auto_failback(void *arg)
7921 7923 {
7922 7924 struct scsi_vhci_lun *vlun = (struct scsi_vhci_lun *)arg;
7923 7925 dev_info_t *vdip, *cdip;
7924 7926 int held;
7925 7927
7926 7928 cdip = vlun->svl_dip;
7927 7929 vdip = ddi_get_parent(cdip);
7928 7930
7929 7931 VHCI_HOLD_LUN(vlun, VH_SLEEP, held);
7930 7932
7931 7933 /*
7932 7934 * Perform a final check to see if the active path class is indeed
7933 7935 * not the preferred path class. As in the time the auto failback
7934 7936 * was dispatched, an external failover could have been detected.
7935 7937 * [Some other host could have detected this condition and triggered
7936 7938 * the auto failback before].
7937 7939 * In such a case if we go ahead with failover we will be negating the
7938 7940 * whole purpose of auto failback.
7939 7941 */
7940 7942 mutex_enter(&vlun->svl_mutex);
7941 7943 if (vlun->svl_active_pclass != NULL) {
7942 7944 char *best_pclass;
7943 7945 struct scsi_failover_ops *fo;
7944 7946
7945 7947 fo = vlun->svl_fops;
7946 7948
7947 7949 (void) fo->sfo_pathclass_next(NULL, &best_pclass,
7948 7950 vlun->svl_fops_ctpriv);
7949 7951 if (strcmp(vlun->svl_active_pclass, best_pclass) == 0) {
7950 7952 mutex_exit(&vlun->svl_mutex);
7951 7953 VHCI_RELEASE_LUN(vlun);
7952 7954 VHCI_DEBUG(1, (CE_NOTE, NULL, "Not initiating "
7953 7955 "auto failback for %s as %s pathclass already "
7954 7956 "active.\n", vlun->svl_lun_wwn, best_pclass));
7955 7957 return;
7956 7958 }
7957 7959 }
7958 7960 mutex_exit(&vlun->svl_mutex);
7959 7961 if (mdi_failover(vdip, vlun->svl_dip, MDI_FAILOVER_SYNC)
7960 7962 == MDI_SUCCESS) {
7961 7963 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7962 7964 "succeeded for device %s (GUID %s)",
7963 7965 ddi_node_name(cdip), vlun->svl_lun_wwn);
7964 7966 } else {
7965 7967 vhci_log(CE_NOTE, vdip, "!Auto failback operation "
7966 7968 "failed for device %s (GUID %s)",
7967 7969 ddi_node_name(cdip), vlun->svl_lun_wwn);
7968 7970 }
7969 7971 VHCI_RELEASE_LUN(vlun);
7970 7972 }
7971 7973
7972 7974 #ifdef DEBUG
7973 7975 static void
7974 7976 vhci_print_prin_keys(vhci_prin_readkeys_t *prin, int numkeys)
7975 7977 {
7976 7978 vhci_clean_print(NULL, 5, "Current PGR Keys",
7977 7979 (uchar_t *)prin, numkeys * 8);
7978 7980 }
7979 7981 #endif
7980 7982
7981 7983 static void
7982 7984 vhci_print_prout_keys(scsi_vhci_lun_t *vlun, char *msg)
7983 7985 {
7984 7986 int i;
7985 7987 vhci_prout_t *prout;
7986 7988 char buf1[4*MHIOC_RESV_KEY_SIZE + 1];
7987 7989 char buf2[4*MHIOC_RESV_KEY_SIZE + 1];
7988 7990 char buf3[4*MHIOC_RESV_KEY_SIZE + 1];
7989 7991 char buf4[4*MHIOC_RESV_KEY_SIZE + 1];
7990 7992
7991 7993 prout = &vlun->svl_prout;
7992 7994
7993 7995 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7994 7996 (void) sprintf(&buf1[4*i], "[%02x]", prout->res_key[i]);
7995 7997 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7996 7998 (void) sprintf(&buf2[(4*i)], "[%02x]", prout->service_key[i]);
7997 7999 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
7998 8000 (void) sprintf(&buf3[4*i], "[%02x]", prout->active_res_key[i]);
7999 8001 for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
8000 8002 (void) sprintf(&buf4[4*i], "[%02x]",
8001 8003 prout->active_service_key[i]);
8002 8004
8003 8005 /* Printing all in one go. Otherwise it will jumble up */
8004 8006 VHCI_DEBUG(5, (CE_CONT, NULL, "%s vlun 0x%p, thread 0x%p\n"
8005 8007 "res_key: : %s\n"
8006 8008 "service_key : %s\n"
8007 8009 "active_res_key : %s\n"
8008 8010 "active_service_key: %s\n",
8009 8011 msg, (void *)vlun, (void *)curthread, buf1, buf2, buf3, buf4));
8010 8012 }
8011 8013
8012 8014 /*
8013 8015 * Called from vhci_scsi_start to update the pHCI pkt with target packet.
8014 8016 */
8015 8017 static void
8016 8018 vhci_update_pHCI_pkt(struct vhci_pkt *vpkt, struct scsi_pkt *pkt)
8017 8019 {
8018 8020
8019 8021 ASSERT(vpkt->vpkt_hba_pkt);
8020 8022
8021 8023 vpkt->vpkt_hba_pkt->pkt_flags = pkt->pkt_flags;
8022 8024 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOQUEUE;
8023 8025
8024 8026 if ((vpkt->vpkt_hba_pkt->pkt_flags & FLAG_NOINTR) ||
8025 8027 MDI_PI_IS_SUSPENDED(vpkt->vpkt_path)) {
8026 8028 /*
8027 8029 * Polled Command is requested or HBA is in
8028 8030 * suspended state
8029 8031 */
8030 8032 vpkt->vpkt_hba_pkt->pkt_flags |= FLAG_NOINTR;
8031 8033 vpkt->vpkt_hba_pkt->pkt_comp = NULL;
8032 8034 } else {
8033 8035 vpkt->vpkt_hba_pkt->pkt_comp = vhci_intr;
8034 8036 }
8035 8037 vpkt->vpkt_hba_pkt->pkt_time = pkt->pkt_time;
8036 8038 bcopy(pkt->pkt_cdbp, vpkt->vpkt_hba_pkt->pkt_cdbp,
8037 8039 vpkt->vpkt_tgt_init_cdblen);
8038 8040 vpkt->vpkt_hba_pkt->pkt_resid = pkt->pkt_resid;
8039 8041
8040 8042 /* Re-initialize the following pHCI packet state information */
8041 8043 vpkt->vpkt_hba_pkt->pkt_state = 0;
8042 8044 vpkt->vpkt_hba_pkt->pkt_statistics = 0;
8043 8045 vpkt->vpkt_hba_pkt->pkt_reason = 0;
8044 8046 }
8045 8047
8046 8048 static int
8047 8049 vhci_scsi_bus_power(dev_info_t *parent, void *impl_arg, pm_bus_power_op_t op,
8048 8050 void *arg, void *result)
8049 8051 {
8050 8052 int ret = DDI_SUCCESS;
8051 8053
8052 8054 /*
8053 8055 * Generic processing in MPxIO framework
8054 8056 */
8055 8057 ret = mdi_bus_power(parent, impl_arg, op, arg, result);
8056 8058
8057 8059 switch (ret) {
8058 8060 case MDI_SUCCESS:
8059 8061 ret = DDI_SUCCESS;
8060 8062 break;
8061 8063 case MDI_FAILURE:
8062 8064 ret = DDI_FAILURE;
8063 8065 break;
8064 8066 default:
8065 8067 break;
8066 8068 }
8067 8069
8068 8070 return (ret);
8069 8071 }
8070 8072
8071 8073 static int
8072 8074 vhci_pHCI_cap(struct scsi_address *ap, char *cap, int val, int whom,
8073 8075 mdi_pathinfo_t *pip)
8074 8076 {
8075 8077 dev_info_t *cdip;
8076 8078 mdi_pathinfo_t *npip = NULL;
8077 8079 scsi_vhci_priv_t *svp = NULL;
8078 8080 struct scsi_address *pap = NULL;
8079 8081 scsi_hba_tran_t *hba = NULL;
8080 8082 int sps;
8081 8083 int mps_flag;
8082 8084 int rval = 0;
8083 8085
8084 8086 mps_flag = (MDI_SELECT_ONLINE_PATH | MDI_SELECT_STANDBY_PATH);
8085 8087 if (pip) {
8086 8088 /*
8087 8089 * If the call is from vhci_pathinfo_state_change,
8088 8090 * then this path was busy and is becoming ready to accept IO.
8089 8091 */
8090 8092 ASSERT(ap != NULL);
8091 8093 hba = ap->a_hba_tran;
8092 8094 ASSERT(hba != NULL);
8093 8095 rval = scsi_ifsetcap(ap, cap, val, whom);
8094 8096
8095 8097 VHCI_DEBUG(2, (CE_NOTE, NULL,
8096 8098 "!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
8097 8099 (void *)pip, (void *)ap, rval));
8098 8100
8099 8101 return (rval);
8100 8102 }
8101 8103
8102 8104 /*
8103 8105 * Set capability on all the pHCIs.
8104 8106 * If any path is busy, then the capability would be set by
8105 8107 * vhci_pathinfo_state_change.
8106 8108 */
8107 8109
8108 8110 cdip = ADDR2DIP(ap);
8109 8111 ASSERT(cdip != NULL);
8110 8112 sps = mdi_select_path(cdip, NULL, mps_flag, NULL, &pip);
8111 8113 if ((sps != MDI_SUCCESS) || (pip == NULL)) {
8112 8114 VHCI_DEBUG(2, (CE_WARN, NULL,
8113 8115 "!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
8114 8116 (void *)cdip));
8115 8117 return (0);
8116 8118 }
8117 8119
8118 8120 again:
8119 8121 svp = (scsi_vhci_priv_t *)mdi_pi_get_vhci_private(pip);
8120 8122 if (svp == NULL) {
8121 8123 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8122 8124 "priv is NULL, pip 0x%p", (void *)pip));
8123 8125 mdi_rele_path(pip);
8124 8126 return (rval);
8125 8127 }
8126 8128
8127 8129 if (svp->svp_psd == NULL) {
8128 8130 VHCI_DEBUG(2, (CE_WARN, NULL, "!vhci_pHCI_cap: "
8129 8131 "psd is NULL, pip 0x%p, svp 0x%p",
8130 8132 (void *)pip, (void *)svp));
8131 8133 mdi_rele_path(pip);
8132 8134 return (rval);
8133 8135 }
8134 8136
8135 8137 pap = &svp->svp_psd->sd_address;
8136 8138 ASSERT(pap != NULL);
8137 8139 hba = pap->a_hba_tran;
8138 8140 ASSERT(hba != NULL);
8139 8141
8140 8142 if (hba->tran_setcap != NULL) {
8141 8143 rval = scsi_ifsetcap(pap, cap, val, whom);
8142 8144
8143 8145 VHCI_DEBUG(2, (CE_NOTE, NULL,
8144 8146 "!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
8145 8147 (void *)pip, (void *)ap, rval));
8146 8148
8147 8149 /*
8148 8150 * Select next path and issue the setcap, repeat
8149 8151 * until all paths are exhausted
8150 8152 */
8151 8153 sps = mdi_select_path(cdip, NULL, mps_flag, pip, &npip);
8152 8154 if ((sps != MDI_SUCCESS) || (npip == NULL)) {
8153 8155 mdi_rele_path(pip);
8154 8156 return (1);
8155 8157 }
8156 8158 mdi_rele_path(pip);
8157 8159 pip = npip;
8158 8160 goto again;
8159 8161 }
8160 8162 mdi_rele_path(pip);
8161 8163 return (rval);
8162 8164 }
8163 8165
8164 8166 static int
8165 8167 vhci_scsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8166 8168 void *arg, dev_info_t **child)
8167 8169 {
8168 8170 char *guid;
8169 8171
8170 8172 if (vhci_bus_config_debug)
8171 8173 flags |= NDI_DEVI_DEBUG;
8172 8174
8173 8175 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ONE)
8174 8176 guid = vhci_devnm_to_guid((char *)arg);
8175 8177 else
8176 8178 guid = NULL;
8177 8179
8178 8180 if (mdi_vhci_bus_config(pdip, flags, op, arg, child, guid)
8179 8181 == MDI_SUCCESS)
8180 8182 return (NDI_SUCCESS);
8181 8183 else
8182 8184 return (NDI_FAILURE);
8183 8185 }
8184 8186
8185 8187 static int
8186 8188 vhci_scsi_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
8187 8189 void *arg)
8188 8190 {
8189 8191 if (vhci_bus_config_debug)
8190 8192 flags |= NDI_DEVI_DEBUG;
8191 8193
8192 8194 return (ndi_busop_bus_unconfig(pdip, flags, op, arg));
8193 8195 }
8194 8196
8195 8197 /*
8196 8198 * Take the original vhci_pkt, create a duplicate of the pkt for resending
8197 8199 * as though it originated in ssd.
8198 8200 */
8199 8201 static struct scsi_pkt *
8200 8202 vhci_create_retry_pkt(struct vhci_pkt *vpkt)
8201 8203 {
8202 8204 struct vhci_pkt *new_vpkt = NULL;
8203 8205 struct scsi_pkt *pkt = NULL;
8204 8206
8205 8207 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8206 8208 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8207 8209
8208 8210 /*
8209 8211 * Ensure consistent data at completion time by setting PKT_CONSISTENT
8210 8212 */
8211 8213 pkt = vhci_scsi_init_pkt(&svp->svp_psd->sd_address, pkt,
8212 8214 vpkt->vpkt_tgt_init_bp, vpkt->vpkt_tgt_init_cdblen,
8213 8215 vpkt->vpkt_tgt_init_scblen, 0, PKT_CONSISTENT, NULL_FUNC, NULL);
8214 8216 if (pkt != NULL) {
8215 8217 new_vpkt = TGTPKT2VHCIPKT(pkt);
8216 8218
8217 8219 pkt->pkt_address = vpkt->vpkt_tgt_pkt->pkt_address;
8218 8220 pkt->pkt_flags = vpkt->vpkt_tgt_pkt->pkt_flags;
8219 8221 pkt->pkt_time = vpkt->vpkt_tgt_pkt->pkt_time;
8220 8222 pkt->pkt_comp = vpkt->vpkt_tgt_pkt->pkt_comp;
8221 8223
8222 8224 pkt->pkt_resid = 0;
8223 8225 pkt->pkt_statistics = 0;
8224 8226 pkt->pkt_reason = 0;
8225 8227
8226 8228 bcopy(vpkt->vpkt_tgt_pkt->pkt_cdbp,
8227 8229 pkt->pkt_cdbp, vpkt->vpkt_tgt_init_cdblen);
8228 8230
8229 8231 /*
8230 8232 * Save a pointer to the original vhci_pkt
8231 8233 */
8232 8234 new_vpkt->vpkt_org_vpkt = vpkt;
8233 8235 }
8234 8236
8235 8237 return (pkt);
8236 8238 }
8237 8239
8238 8240 /*
8239 8241 * Copy the successful completion information from the hba packet into
8240 8242 * the original target pkt from the upper layer. Returns the original
8241 8243 * vpkt and destroys the new vpkt from the internal retry.
8242 8244 */
8243 8245 static struct vhci_pkt *
8244 8246 vhci_sync_retry_pkt(struct vhci_pkt *vpkt)
8245 8247 {
8246 8248 struct vhci_pkt *ret_vpkt = NULL;
8247 8249 struct scsi_pkt *tpkt = NULL;
8248 8250 struct scsi_pkt *hba_pkt = NULL;
8249 8251 scsi_vhci_priv_t *svp = (scsi_vhci_priv_t *)
8250 8252 mdi_pi_get_vhci_private(vpkt->vpkt_path);
8251 8253
8252 8254 ASSERT(vpkt->vpkt_org_vpkt != NULL);
8253 8255 VHCI_DEBUG(0, (CE_NOTE, NULL, "vhci_sync_retry_pkt: Retry pkt "
8254 8256 "completed successfully!\n"));
8255 8257
8256 8258 ret_vpkt = vpkt->vpkt_org_vpkt;
8257 8259 tpkt = ret_vpkt->vpkt_tgt_pkt;
8258 8260 hba_pkt = vpkt->vpkt_hba_pkt;
8259 8261
8260 8262 /*
8261 8263 * Copy the good status into the target driver's packet
8262 8264 */
8263 8265 *(tpkt->pkt_scbp) = *(hba_pkt->pkt_scbp);
8264 8266 tpkt->pkt_resid = hba_pkt->pkt_resid;
8265 8267 tpkt->pkt_state = hba_pkt->pkt_state;
8266 8268 tpkt->pkt_statistics = hba_pkt->pkt_statistics;
8267 8269 tpkt->pkt_reason = hba_pkt->pkt_reason;
8268 8270
8269 8271 /*
8270 8272 * Destroy the internally created vpkt for the retry
8271 8273 */
8272 8274 vhci_scsi_destroy_pkt(&svp->svp_psd->sd_address,
8273 8275 vpkt->vpkt_tgt_pkt);
8274 8276
8275 8277 return (ret_vpkt);
8276 8278 }
8277 8279
8278 8280 /* restart the request sense request */
8279 8281 static void
8280 8282 vhci_uscsi_restart_sense(void *arg)
8281 8283 {
8282 8284 struct buf *rqbp;
8283 8285 struct buf *bp;
8284 8286 struct scsi_pkt *rqpkt = (struct scsi_pkt *)arg;
8285 8287 mp_uscsi_cmd_t *mp_uscmdp;
8286 8288
8287 8289 VHCI_DEBUG(4, (CE_WARN, NULL,
8288 8290 "vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
8289 8291
8290 8292 if (scsi_transport(rqpkt) != TRAN_ACCEPT) {
8291 8293 /* if it fails - need to wakeup the original command */
8292 8294 mp_uscmdp = rqpkt->pkt_private;
8293 8295 bp = mp_uscmdp->cmdbp;
8294 8296 rqbp = mp_uscmdp->rqbp;
8295 8297 ASSERT(mp_uscmdp && bp && rqbp);
8296 8298 scsi_free_consistent_buf(rqbp);
8297 8299 scsi_destroy_pkt(rqpkt);
8298 8300 bp->b_resid = bp->b_bcount;
8299 8301 bioerror(bp, EIO);
8300 8302 biodone(bp);
8301 8303 }
8302 8304 }
8303 8305
8304 8306 /*
8305 8307 * auto-rqsense is not enabled so we have to retrieve the request sense
8306 8308 * manually.
8307 8309 */
8308 8310 static int
8309 8311 vhci_uscsi_send_sense(struct scsi_pkt *pkt, mp_uscsi_cmd_t *mp_uscmdp)
8310 8312 {
8311 8313 struct buf *rqbp, *cmdbp;
8312 8314 struct scsi_pkt *rqpkt;
8313 8315 int rval = 0;
8314 8316
8315 8317 cmdbp = mp_uscmdp->cmdbp;
8316 8318 ASSERT(cmdbp != NULL);
8317 8319
8318 8320 VHCI_DEBUG(4, (CE_WARN, NULL,
8319 8321 "vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
8320 8322 (void *)cmdbp, (void *)pkt, (void *)mp_uscmdp));
8321 8323 /* set up the packet information and cdb */
8322 8324 if ((rqbp = scsi_alloc_consistent_buf(mp_uscmdp->ap, NULL,
8323 8325 SENSE_LENGTH, B_READ, NULL, NULL)) == NULL) {
8324 8326 return (-1);
8325 8327 }
8326 8328
8327 8329 if ((rqpkt = scsi_init_pkt(mp_uscmdp->ap, NULL, rqbp,
8328 8330 CDB_GROUP0, 1, 0, PKT_CONSISTENT, NULL, NULL)) == NULL) {
8329 8331 scsi_free_consistent_buf(rqbp);
8330 8332 return (-1);
8331 8333 }
8332 8334
8333 8335 (void) scsi_setup_cdb((union scsi_cdb *)(intptr_t)rqpkt->pkt_cdbp,
8334 8336 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
8335 8337
8336 8338 mp_uscmdp->rqbp = rqbp;
8337 8339 rqbp->b_private = mp_uscmdp;
8338 8340 rqpkt->pkt_flags |= FLAG_SENSING;
8339 8341 rqpkt->pkt_time = 60;
8340 8342 rqpkt->pkt_comp = vhci_uscsi_iodone;
8341 8343 rqpkt->pkt_private = mp_uscmdp;
8342 8344
8343 8345 /*
8344 8346 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8345 8347 * selection is not based on path_instance.
8346 8348 */
8347 8349 if (scsi_pkt_allocated_correctly(rqpkt))
8348 8350 rqpkt->pkt_path_instance = 0;
8349 8351
8350 8352 /* get her done */
8351 8353 switch (scsi_transport(rqpkt)) {
8352 8354 case TRAN_ACCEPT:
8353 8355 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8354 8356 "transport accepted."));
8355 8357 break;
8356 8358 case TRAN_BUSY:
8357 8359 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8358 8360 "transport busy, setting timeout."));
8359 8361 vhci_restart_timeid = timeout(vhci_uscsi_restart_sense, rqpkt,
8360 8362 (drv_usectohz(5 * 1000000)));
8361 8363 break;
8362 8364 default:
8363 8365 VHCI_DEBUG(1, (CE_NOTE, NULL, "vhci_uscsi_send_sense: "
8364 8366 "transport failed"));
8365 8367 scsi_free_consistent_buf(rqbp);
8366 8368 scsi_destroy_pkt(rqpkt);
8367 8369 rval = -1;
8368 8370 }
8369 8371
8370 8372 return (rval);
8371 8373 }
8372 8374
8373 8375 /*
8374 8376 * done routine for the mpapi uscsi command - this is behaving as though
8375 8377 * FLAG_DIAGNOSE is set meaning there are no retries except for a manual
8376 8378 * request sense.
8377 8379 */
8378 8380 void
8379 8381 vhci_uscsi_iodone(struct scsi_pkt *pkt)
8380 8382 {
8381 8383 struct buf *bp;
8382 8384 mp_uscsi_cmd_t *mp_uscmdp;
8383 8385 struct uscsi_cmd *uscmdp;
8384 8386 struct scsi_arq_status *arqstat;
8385 8387 int err;
8386 8388
8387 8389 mp_uscmdp = (mp_uscsi_cmd_t *)pkt->pkt_private;
8388 8390 uscmdp = mp_uscmdp->uscmdp;
8389 8391 bp = mp_uscmdp->cmdbp;
8390 8392 ASSERT(bp != NULL);
8391 8393 VHCI_DEBUG(4, (CE_WARN, NULL,
8392 8394 "vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
8393 8395 (void *)bp, (void *)pkt, (void *)mp_uscmdp));
8394 8396 /* Save the status and the residual into the uscsi_cmd struct */
8395 8397 uscmdp->uscsi_status = ((*(pkt)->pkt_scbp) & STATUS_MASK);
8396 8398 uscmdp->uscsi_resid = bp->b_resid;
8397 8399
8398 8400 /* return on a very successful command */
8399 8401 if (pkt->pkt_reason == CMD_CMPLT &&
8400 8402 SCBP_C(pkt) == 0 && ((pkt->pkt_flags & FLAG_SENSING) == 0) &&
8401 8403 pkt->pkt_resid == 0) {
8402 8404 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8403 8405 scsi_destroy_pkt(pkt);
8404 8406 biodone(bp);
8405 8407 return;
8406 8408 }
8407 8409 VHCI_DEBUG(4, (CE_NOTE, NULL, "iodone: reason=0x%x "
8408 8410 " pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
8409 8411 pkt->pkt_reason, pkt->pkt_resid,
8410 8412 pkt->pkt_state, bp->b_bcount, bp->b_resid));
8411 8413
8412 8414 err = EIO;
8413 8415
8414 8416 arqstat = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
8415 8417 if (pkt->pkt_reason != CMD_CMPLT) {
8416 8418 /*
8417 8419 * The command did not complete.
8418 8420 */
8419 8421 VHCI_DEBUG(4, (CE_NOTE, NULL,
8420 8422 "vhci_uscsi_iodone: command did not complete."
8421 8423 " reason: %x flag: %x", pkt->pkt_reason, pkt->pkt_flags));
8422 8424 if (pkt->pkt_flags & FLAG_SENSING) {
8423 8425 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8424 8426 } else if (pkt->pkt_reason == CMD_TIMEOUT) {
8425 8427 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_HARDERR);
8426 8428 err = ETIMEDOUT;
8427 8429 }
8428 8430 } else if (pkt->pkt_state & STATE_ARQ_DONE && mp_uscmdp->arq_enabled) {
8429 8431 /*
8430 8432 * The auto-rqsense happened, and the packet has a filled-in
8431 8433 * scsi_arq_status structure, pointed to by pkt_scbp.
8432 8434 */
8433 8435 VHCI_DEBUG(4, (CE_NOTE, NULL,
8434 8436 "vhci_uscsi_iodone: received auto-requested sense"));
8435 8437 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8436 8438 /* get the amount of data to copy into rqbuf */
8437 8439 int rqlen = SENSE_LENGTH - arqstat->sts_rqpkt_resid;
8438 8440 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8439 8441 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8440 8442 uscmdp->uscsi_rqstatus =
8441 8443 *((char *)&arqstat->sts_rqpkt_status);
8442 8444 if (uscmdp->uscsi_rqbuf && uscmdp->uscsi_rqlen &&
8443 8445 rqlen != 0) {
8444 8446 bcopy(&(arqstat->sts_sensedata),
8445 8447 uscmdp->uscsi_rqbuf, rqlen);
8446 8448 }
8447 8449 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8448 8450 VHCI_DEBUG(4, (CE_NOTE, NULL,
8449 8451 "vhci_uscsi_iodone: ARQ "
8450 8452 "uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
8451 8453 "xfer: %d rqpkt_resid: %d\n",
8452 8454 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid,
8453 8455 uscmdp->uscsi_rqlen, rqlen,
8454 8456 arqstat->sts_rqpkt_resid));
8455 8457 }
8456 8458 } else if (pkt->pkt_flags & FLAG_SENSING) {
8457 8459 struct buf *rqbp;
8458 8460 struct scsi_status *rqstatus;
8459 8461
8460 8462 rqstatus = (struct scsi_status *)pkt->pkt_scbp;
8461 8463 /* a manual request sense was done - get the information */
8462 8464 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8463 8465 int rqlen = SENSE_LENGTH - pkt->pkt_resid;
8464 8466
8465 8467 rqbp = mp_uscmdp->rqbp;
8466 8468 /* get the amount of data to copy into rqbuf */
8467 8469 rqlen = min(((int)uscmdp->uscsi_rqlen), rqlen);
8468 8470 uscmdp->uscsi_rqresid = uscmdp->uscsi_rqlen - rqlen;
8469 8471 uscmdp->uscsi_rqstatus = *((char *)rqstatus);
8470 8472 if (uscmdp->uscsi_rqlen && uscmdp->uscsi_rqbuf) {
8471 8473 bcopy(rqbp->b_un.b_addr, uscmdp->uscsi_rqbuf,
8472 8474 rqlen);
8473 8475 }
8474 8476 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8475 8477 scsi_free_consistent_buf(rqbp);
8476 8478 }
8477 8479 VHCI_DEBUG(4, (CE_NOTE, NULL, "vhci_uscsi_iodone: FLAG_SENSING"
8478 8480 "uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
8479 8481 uscmdp->uscsi_rqstatus, uscmdp->uscsi_rqresid));
8480 8482 } else {
8481 8483 struct scsi_status *status =
8482 8484 (struct scsi_status *)pkt->pkt_scbp;
8483 8485 /*
8484 8486 * Command completed and we're not getting sense. Check for
8485 8487 * errors and decide what to do next.
8486 8488 */
8487 8489 VHCI_DEBUG(4, (CE_NOTE, NULL,
8488 8490 "vhci_uscsi_iodone: command appears complete: reason: %x",
8489 8491 pkt->pkt_reason));
8490 8492 if (status->sts_chk) {
8491 8493 /* need to manually get the request sense */
8492 8494 if (vhci_uscsi_send_sense(pkt, mp_uscmdp) == 0) {
8493 8495 scsi_destroy_pkt(pkt);
8494 8496 return;
8495 8497 }
8496 8498 } else {
8497 8499 VHCI_DEBUG(4, (CE_NOTE, NULL,
8498 8500 "vhci_chk_err: appears complete"));
8499 8501 err = 0;
8500 8502 mdi_pi_kstat_iosupdate(mp_uscmdp->pip, bp);
8501 8503 if (pkt->pkt_resid) {
8502 8504 bp->b_resid += pkt->pkt_resid;
8503 8505 }
8504 8506 }
8505 8507 }
8506 8508
8507 8509 if (err) {
8508 8510 if (bp->b_resid == 0)
8509 8511 bp->b_resid = bp->b_bcount;
8510 8512 bioerror(bp, err);
8511 8513 bp->b_flags |= B_ERROR;
8512 8514 }
8513 8515
8514 8516 scsi_destroy_pkt(pkt);
8515 8517 biodone(bp);
8516 8518
8517 8519 VHCI_DEBUG(4, (CE_WARN, NULL, "vhci_uscsi_iodone: exit"));
8518 8520 }
8519 8521
8520 8522 /*
8521 8523 * start routine for the mpapi uscsi command
8522 8524 */
8523 8525 int
8524 8526 vhci_uscsi_iostart(struct buf *bp)
8525 8527 {
8526 8528 struct scsi_pkt *pkt;
8527 8529 struct uscsi_cmd *uscmdp;
8528 8530 mp_uscsi_cmd_t *mp_uscmdp;
8529 8531 int stat_size, rval;
8530 8532 int retry = 0;
8531 8533
8532 8534 ASSERT(bp->b_private != NULL);
8533 8535
8534 8536 mp_uscmdp = (mp_uscsi_cmd_t *)bp->b_private;
8535 8537 uscmdp = mp_uscmdp->uscmdp;
8536 8538 if (uscmdp->uscsi_flags & USCSI_RQENABLE) {
8537 8539 stat_size = SENSE_LENGTH;
8538 8540 } else {
8539 8541 stat_size = 1;
8540 8542 }
8541 8543
8542 8544 pkt = scsi_init_pkt(mp_uscmdp->ap, NULL, bp, uscmdp->uscsi_cdblen,
8543 8545 stat_size, 0, 0, SLEEP_FUNC, NULL);
8544 8546 if (pkt == NULL) {
8545 8547 VHCI_DEBUG(4, (CE_NOTE, NULL,
8546 8548 "vhci_uscsi_iostart: rval: EINVAL"));
8547 8549 bp->b_resid = bp->b_bcount;
8548 8550 uscmdp->uscsi_resid = bp->b_bcount;
8549 8551 bioerror(bp, EINVAL);
8550 8552 biodone(bp);
8551 8553 return (EINVAL);
8552 8554 }
8553 8555
8554 8556 pkt->pkt_time = uscmdp->uscsi_timeout;
8555 8557 bcopy(uscmdp->uscsi_cdb, pkt->pkt_cdbp, (size_t)uscmdp->uscsi_cdblen);
8556 8558 pkt->pkt_comp = vhci_uscsi_iodone;
8557 8559 pkt->pkt_private = mp_uscmdp;
8558 8560 if (uscmdp->uscsi_flags & USCSI_SILENT)
8559 8561 pkt->pkt_flags |= FLAG_SILENT;
8560 8562 if (uscmdp->uscsi_flags & USCSI_ISOLATE)
8561 8563 pkt->pkt_flags |= FLAG_ISOLATE;
8562 8564 if (uscmdp->uscsi_flags & USCSI_DIAGNOSE)
8563 8565 pkt->pkt_flags |= FLAG_DIAGNOSE;
8564 8566 if (uscmdp->uscsi_flags & USCSI_RENEGOT) {
8565 8567 pkt->pkt_flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
8566 8568 }
8567 8569 VHCI_DEBUG(4, (CE_WARN, NULL,
8568 8570 "vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
8569 8571 " ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
8570 8572 " stat_size: %d",
8571 8573 (void *)mp_uscmdp->ap, (void *)pkt, (void *)pkt->pkt_cdbp,
8572 8574 (void *)uscmdp, (void *)uscmdp->uscsi_cdb, pkt->pkt_cdblen,
8573 8575 (void *)bp, bp->b_bcount, (void *)mp_uscmdp->pip, stat_size));
8574 8576
8575 8577 /*
8576 8578 * NOTE: This code path is related to MPAPI uscsi(7I), so path
8577 8579 * selection is not based on path_instance.
8578 8580 */
8579 8581 if (scsi_pkt_allocated_correctly(pkt))
8580 8582 pkt->pkt_path_instance = 0;
8581 8583
8582 8584 while (((rval = scsi_transport(pkt)) == TRAN_BUSY) &&
8583 8585 retry < vhci_uscsi_retry_count) {
8584 8586 delay(drv_usectohz(vhci_uscsi_delay));
8585 8587 retry++;
8586 8588 }
8587 8589 if (retry >= vhci_uscsi_retry_count) {
8588 8590 VHCI_DEBUG(4, (CE_NOTE, NULL,
8589 8591 "vhci_uscsi_iostart: tran_busy - retry: %d", retry));
8590 8592 }
8591 8593 switch (rval) {
8592 8594 case TRAN_ACCEPT:
8593 8595 rval = 0;
8594 8596 break;
8595 8597
8596 8598 default:
8597 8599 VHCI_DEBUG(4, (CE_NOTE, NULL,
8598 8600 "vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
8599 8601 rval, bp->b_bcount, bp->b_resid));
8600 8602 bp->b_resid = bp->b_bcount;
8601 8603 uscmdp->uscsi_resid = bp->b_bcount;
8602 8604 bioerror(bp, EIO);
8603 8605 scsi_destroy_pkt(pkt);
8604 8606 biodone(bp);
8605 8607 rval = EIO;
8606 8608 MDI_PI_ERRSTAT(mp_uscmdp->pip, MDI_PI_TRANSERR);
8607 8609 break;
8608 8610 }
8609 8611 VHCI_DEBUG(4, (CE_NOTE, NULL,
8610 8612 "vhci_uscsi_iostart: exit: rval: %d", rval));
8611 8613 return (rval);
8612 8614 }
8613 8615
8614 8616 /* ARGSUSED */
8615 8617 static struct scsi_failover_ops *
8616 8618 vhci_dev_fo(dev_info_t *vdip, struct scsi_device *psd,
8617 8619 void **ctprivp, char **fo_namep)
8618 8620 {
8619 8621 struct scsi_failover_ops *sfo;
8620 8622 char *sfo_name;
8621 8623 char *override;
8622 8624 struct scsi_failover *sf;
8623 8625
8624 8626 ASSERT(psd && psd->sd_inq);
8625 8627 if ((psd == NULL) || (psd->sd_inq == NULL)) {
8626 8628 VHCI_DEBUG(1, (CE_NOTE, NULL,
8627 8629 "!vhci_dev_fo:return NULL no scsi_device or inquiry"));
8628 8630 return (NULL);
8629 8631 }
8630 8632
8631 8633 /*
8632 8634 * Determine if device is supported under scsi_vhci, and select
8633 8635 * failover module.
8634 8636 *
8635 8637 * See if there is a scsi_vhci.conf file override for this devices's
8636 8638 * VID/PID. The following values can be returned:
8637 8639 *
8638 8640 * NULL If the NULL is returned then there is no scsi_vhci.conf
8639 8641 * override. For NULL, we determine the failover_ops for
8640 8642 * this device by checking the sfo_device_probe entry
8641 8643 * point for each 'fops' module, in order.
8642 8644 *
8643 8645 * NOTE: Correct operation may depend on module ordering
8644 8646 * of 'specific' (failover modules that are completely
8645 8647 * VID/PID table based) to 'generic' (failover modules
8646 8648 * that based on T10 standards like TPGS). Currently,
8647 8649 * the value of 'ddi-forceload' in scsi_vhci.conf is used
8648 8650 * to establish the module list and probe order.
8649 8651 *
8650 8652 * "NONE" If value "NONE" is returned then there is a
8651 8653 * scsi_vhci.conf VID/PID override to indicate the device
8652 8654 * should not be supported under scsi_vhci (even if there
8653 8655 * is an 'fops' module supporting the device).
8654 8656 *
8655 8657 * "<other>" If another value is returned then that value is the
8656 8658 * name of the 'fops' module that should be used.
8657 8659 */
8658 8660 sfo = NULL; /* "NONE" */
8659 8661 override = scsi_get_device_type_string(
8660 8662 "scsi-vhci-failover-override", vdip, psd);
8661 8663 if (override == NULL) {
8662 8664 /* NULL: default: select based on sfo_device_probe results */
8663 8665 for (sf = scsi_failover_table; sf->sf_mod; sf++) {
8664 8666 if ((sf->sf_sfo == NULL) ||
8665 8667 sf->sf_sfo->sfo_device_probe(psd, psd->sd_inq,
8666 8668 ctprivp) == SFO_DEVICE_PROBE_PHCI)
8667 8669 continue;
8668 8670
8669 8671 /* found failover module, supported under scsi_vhci */
8670 8672 sfo = sf->sf_sfo;
8671 8673 if (fo_namep && (*fo_namep == NULL)) {
8672 8674 sfo_name = i_ddi_strdup(sfo->sfo_name,
8673 8675 KM_SLEEP);
8674 8676 *fo_namep = sfo_name;
8675 8677 }
8676 8678 break;
8677 8679 }
8678 8680 } else if (strcasecmp(override, "NONE")) {
8679 8681 /* !"NONE": select based on driver.conf specified name */
8680 8682 for (sf = scsi_failover_table, sfo = NULL; sf->sf_mod; sf++) {
8681 8683 if ((sf->sf_sfo == NULL) ||
8682 8684 (sf->sf_sfo->sfo_name == NULL) ||
8683 8685 strcmp(override, sf->sf_sfo->sfo_name))
8684 8686 continue;
8685 8687
8686 8688 /*
8687 8689 * NOTE: If sfo_device_probe() has side-effects,
8688 8690 * including setting *ctprivp, these are not going
8689 8691 * to occur with override config.
8690 8692 */
8691 8693
8692 8694 /* found failover module, supported under scsi_vhci */
8693 8695 sfo = sf->sf_sfo;
8694 8696 if (fo_namep && (*fo_namep == NULL)) {
8695 8697 sfo_name = kmem_alloc(strlen("conf ") +
8696 8698 strlen(sfo->sfo_name) + 1, KM_SLEEP);
8697 8699 (void) sprintf(sfo_name, "conf %s",
8698 8700 sfo->sfo_name);
8699 8701 *fo_namep = sfo_name;
8700 8702 }
8701 8703 break;
8702 8704 }
8703 8705 }
8704 8706 if (override)
8705 8707 kmem_free(override, strlen(override) + 1);
8706 8708 return (sfo);
8707 8709 }
8708 8710
8709 8711 /*
8710 8712 * Determine the device described by cinfo should be enumerated under
8711 8713 * the vHCI or the pHCI - if there is a failover ops then device is
8712 8714 * supported under vHCI. By agreement with SCSA cinfo is a pointer
8713 8715 * to a scsi_device structure associated with a decorated pHCI probe node.
8714 8716 */
8715 8717 /* ARGSUSED */
8716 8718 int
8717 8719 vhci_is_dev_supported(dev_info_t *vdip, dev_info_t *pdip, void *cinfo)
8718 8720 {
8719 8721 struct scsi_device *psd = (struct scsi_device *)cinfo;
8720 8722
8721 8723 return (vhci_dev_fo(vdip, psd, NULL, NULL) ? MDI_SUCCESS : MDI_FAILURE);
8722 8724 }
8723 8725
8724 8726
8725 8727 #ifdef DEBUG
8726 8728 extern struct scsi_key_strings scsi_cmds[];
8727 8729
8728 8730 static char *
8729 8731 vhci_print_scsi_cmd(char cmd)
8730 8732 {
8731 8733 char tmp[64];
8732 8734 char *cpnt;
8733 8735
8734 8736 cpnt = scsi_cmd_name(cmd, scsi_cmds, tmp);
8735 8737 /* tmp goes out of scope on return and caller sees garbage */
8736 8738 if (cpnt == tmp) {
8737 8739 cpnt = "Unknown Command";
8738 8740 }
8739 8741 return (cpnt);
8740 8742 }
8741 8743
8742 8744 extern uchar_t scsi_cdb_size[];
8743 8745
8744 8746 static void
8745 8747 vhci_print_cdb(dev_info_t *dip, uint_t level, char *title, uchar_t *cdb)
8746 8748 {
8747 8749 int len = scsi_cdb_size[CDB_GROUPID(cdb[0])];
8748 8750 char buf[256];
8749 8751
8750 8752 if (level == CE_NOTE) {
8751 8753 vhci_log(level, dip, "path cmd %s\n",
8752 8754 vhci_print_scsi_cmd(*cdb));
8753 8755 return;
8754 8756 }
8755 8757
8756 8758 (void) sprintf(buf, "%s for cmd(%s)", title, vhci_print_scsi_cmd(*cdb));
8757 8759 vhci_clean_print(dip, level, buf, cdb, len);
8758 8760 }
8759 8761
8760 8762 static void
8761 8763 vhci_clean_print(dev_info_t *dev, uint_t level, char *title, uchar_t *data,
8762 8764 int len)
8763 8765 {
8764 8766 int i;
8765 8767 int c;
8766 8768 char *format;
8767 8769 char buf[256];
8768 8770 uchar_t byte;
8769 8771
8770 8772 (void) sprintf(buf, "%s:\n", title);
8771 8773 vhci_log(level, dev, "%s", buf);
8772 8774 level = CE_CONT;
8773 8775 for (i = 0; i < len; ) {
8774 8776 buf[0] = 0;
8775 8777 for (c = 0; c < 8 && i < len; c++, i++) {
8776 8778 byte = (uchar_t)data[i];
8777 8779 if (byte < 0x10)
8778 8780 format = "0x0%x ";
8779 8781 else
8780 8782 format = "0x%x ";
8781 8783 (void) sprintf(&buf[(int)strlen(buf)], format, byte);
8782 8784 }
8783 8785 (void) sprintf(&buf[(int)strlen(buf)], "\n");
8784 8786
8785 8787 vhci_log(level, dev, "%s\n", buf);
8786 8788 }
8787 8789 }
8788 8790 #endif
8789 8791 static void
8790 8792 vhci_invalidate_mpapi_lu(struct scsi_vhci *vhci, scsi_vhci_lun_t *vlun)
8791 8793 {
8792 8794 char *svl_wwn;
8793 8795 mpapi_item_list_t *ilist;
8794 8796 mpapi_lu_data_t *ld;
8795 8797
8796 8798 if (vlun == NULL) {
8797 8799 return;
8798 8800 } else {
8799 8801 svl_wwn = vlun->svl_lun_wwn;
8800 8802 }
8801 8803
8802 8804 ilist = vhci->mp_priv->obj_hdr_list[MP_OBJECT_TYPE_MULTIPATH_LU]->head;
8803 8805
8804 8806 while (ilist != NULL) {
8805 8807 ld = (mpapi_lu_data_t *)(ilist->item->idata);
8806 8808 if ((ld != NULL) && (strncmp(ld->prop.name, svl_wwn,
8807 8809 strlen(svl_wwn)) == 0)) {
8808 8810 ld->valid = 0;
8809 8811 VHCI_DEBUG(6, (CE_WARN, NULL,
8810 8812 "vhci_invalidate_mpapi_lu: "
8811 8813 "Invalidated LU(%s)", svl_wwn));
8812 8814 return;
8813 8815 }
8814 8816 ilist = ilist->next;
8815 8817 }
8816 8818 VHCI_DEBUG(6, (CE_WARN, NULL, "vhci_invalidate_mpapi_lu: "
8817 8819 "Could not find LU(%s) to invalidate.", svl_wwn));
8818 8820 }
↓ open down ↓ |
5066 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX