Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Fibre Channel SCSI ULP Mapping driver
25 25 */
26 26
27 27 #include <sys/scsi/scsi.h>
28 28 #include <sys/types.h>
29 29 #include <sys/varargs.h>
30 30 #include <sys/devctl.h>
31 31 #include <sys/thread.h>
32 32 #include <sys/thread.h>
33 33 #include <sys/open.h>
34 34 #include <sys/file.h>
35 35 #include <sys/sunndi.h>
36 36 #include <sys/console.h>
37 37 #include <sys/proc.h>
38 38 #include <sys/time.h>
39 39 #include <sys/utsname.h>
40 40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 41 #include <sys/ndi_impldefs.h>
42 42 #include <sys/byteorder.h>
43 43 #include <sys/fs/dv_node.h>
44 44 #include <sys/ctype.h>
45 45 #include <sys/sunmdi.h>
46 46
47 47 #include <sys/fibre-channel/fc.h>
48 48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 49 #include <sys/fibre-channel/ulp/fcpvar.h>
50 50
51 51 /*
52 52 * Discovery Process
53 53 * =================
54 54 *
55 55 * The discovery process is a major function of FCP. In order to help
56 56 * understand that function a flow diagram is given here. This diagram
57 57 * doesn't claim to cover all the cases and the events that can occur during
58 58 * the discovery process nor the subtleties of the code. The code paths shown
59 59 * are simplified. Its purpose is to help the reader (and potentially bug
60 60 * fixer) have an overall view of the logic of the code. For that reason the
61 61 * diagram covers the simple case of the line coming up cleanly or of a new
62 62 * port attaching to FCP the link being up. The reader must keep in mind
63 63 * that:
64 64 *
65 65 * - There are special cases where bringing devices online and offline
66 66 * is driven by Ioctl.
67 67 *
68 68 * - The behavior of the discovery process can be modified through the
69 69 * .conf file.
70 70 *
71 71 * - The line can go down and come back up at any time during the
72 72 * discovery process which explains some of the complexity of the code.
73 73 *
74 74 * ............................................................................
75 75 *
76 76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 77 *
78 78 *
79 79 * +-------------------------+
80 80 * fp/fctl module --->| fcp_port_attach |
81 81 * +-------------------------+
82 82 * | |
83 83 * | |
84 84 * | v
85 85 * | +-------------------------+
86 86 * | | fcp_handle_port_attach |
87 87 * | +-------------------------+
88 88 * | |
89 89 * | |
90 90 * +--------------------+ |
91 91 * | |
92 92 * v v
93 93 * +-------------------------+
94 94 * | fcp_statec_callback |
95 95 * +-------------------------+
96 96 * |
97 97 * |
98 98 * v
99 99 * +-------------------------+
100 100 * | fcp_handle_devices |
101 101 * +-------------------------+
102 102 * |
103 103 * |
104 104 * v
105 105 * +-------------------------+
106 106 * | fcp_handle_mapflags |
107 107 * +-------------------------+
108 108 * |
109 109 * |
110 110 * v
111 111 * +-------------------------+
112 112 * | fcp_send_els |
113 113 * | |
114 114 * | PLOGI or PRLI To all the|
115 115 * | reachable devices. |
116 116 * +-------------------------+
117 117 *
118 118 *
119 119 * ............................................................................
120 120 *
121 121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 122 * STEP 1 are called (it is actually the same function).
123 123 *
124 124 *
125 125 * +-------------------------+
126 126 * | fcp_icmd_callback |
127 127 * fp/fctl module --->| |
128 128 * | callback for PLOGI and |
129 129 * | PRLI. |
130 130 * +-------------------------+
131 131 * |
132 132 * |
133 133 * Received PLOGI Accept /-\ Received PRLI Accept
134 134 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 135 * | \ / |
136 136 * | \-/ |
137 137 * | |
138 138 * v v
139 139 * +-------------------------+ +-------------------------+
140 140 * | fcp_send_els | | fcp_send_scsi |
141 141 * | | | |
142 142 * | PRLI | | REPORT_LUN |
143 143 * +-------------------------+ +-------------------------+
144 144 *
145 145 * ............................................................................
146 146 *
147 147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 148 * (It is actually the same function).
149 149 *
150 150 *
151 151 * +-------------------------+
152 152 * fp/fctl module ------->| fcp_scsi_callback |
153 153 * +-------------------------+
154 154 * |
155 155 * |
156 156 * |
157 157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 159 * | \ / |
160 160 * | \-/ |
161 161 * | | |
162 162 * | Receive INQUIRY reply| |
163 163 * | | |
164 164 * v v v
165 165 * +------------------------+ +----------------------+ +----------------------+
166 166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 168 * +------------------------+ +----------------------+ +----------------------+
169 169 * | | |
170 170 * | | |
171 171 * | | |
172 172 * v v |
173 173 * +-----------------+ +-----------------+ |
174 174 * | fcp_send_scsi | | fcp_send_scsi | |
175 175 * | | | | |
176 176 * | INQUIRY | | INQUIRY PAGE83 | |
177 177 * | (To each LUN) | +-----------------+ |
178 178 * +-----------------+ |
179 179 * |
180 180 * v
181 181 * +------------------------+
182 182 * | fcp_call_finish_init |
183 183 * +------------------------+
184 184 * |
185 185 * v
186 186 * +-----------------------------+
187 187 * | fcp_call_finish_init_held |
188 188 * +-----------------------------+
189 189 * |
190 190 * |
191 191 * All LUNs scanned /-\
192 192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 193 * | \ /
194 194 * | \-/
195 195 * v |
196 196 * +------------------+ |
197 197 * | fcp_finish_tgt | |
198 198 * +------------------+ |
199 199 * | Target Not Offline and |
200 200 * Target Not Offline and | not marked and tgt_node_state |
201 201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 203 * | \ / | |
204 204 * | \-/ | |
205 205 * v v |
206 206 * +----------------------------+ +-------------------+ |
207 207 * | fcp_offline_target | | fcp_create_luns | |
208 208 * | | +-------------------+ |
209 209 * | A structure fcp_tgt_elem | | |
210 210 * | is created and queued in | v |
211 211 * | the FCP port list | +-------------------+ |
212 212 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 213 * | will be unqueued by the | | | |
214 214 * | watchdog timer. | | Called for each | |
215 215 * +----------------------------+ | LUN. Dispatches | |
216 216 * | | fcp_hp_task | |
217 217 * | +-------------------+ |
218 218 * | | |
219 219 * | | |
220 220 * | | |
221 221 * | +---------------->|
222 222 * | |
223 223 * +---------------------------------------------->|
224 224 * |
225 225 * |
226 226 * All the targets (devices) have been scanned /-\
227 227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 228 * | \ /
229 229 * | \-/
230 230 * +-------------------------------------+ |
231 231 * | fcp_finish_init | |
232 232 * | | |
233 233 * | Signal broadcasts the condition | |
234 234 * | variable port_config_cv of the FCP | |
235 235 * | port. One potential code sequence | |
236 236 * | waiting on the condition variable | |
237 237 * | the code sequence handling | |
238 238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 239 * | The other is in the function | |
240 240 * | fcp_reconfig_wait which is called | |
241 241 * | in the transmit path preventing IOs | |
242 242 * | from going through till the disco- | |
243 243 * | very process is over. | |
244 244 * +-------------------------------------+ |
245 245 * | |
246 246 * | |
247 247 * +--------------------------------->|
248 248 * |
249 249 * v
250 250 * Return
251 251 *
252 252 * ............................................................................
253 253 *
254 254 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 255 *
256 256 *
257 257 * +-------------------------+
258 258 * | fcp_hp_task |
259 259 * +-------------------------+
260 260 * |
261 261 * |
262 262 * v
263 263 * +-------------------------+
264 264 * | fcp_trigger_lun |
265 265 * +-------------------------+
266 266 * |
267 267 * |
268 268 * v
269 269 * Bring offline /-\ Bring online
270 270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 271 * | \ / |
272 272 * | \-/ |
273 273 * v v
274 274 * +---------------------+ +-----------------------+
275 275 * | fcp_offline_child | | fcp_get_cip |
276 276 * +---------------------+ | |
277 277 * | Creates a dev_info_t |
278 278 * | or a mdi_pathinfo_t |
279 279 * | depending on whether |
280 280 * | mpxio is on or off. |
281 281 * +-----------------------+
282 282 * |
283 283 * |
284 284 * v
285 285 * +-----------------------+
286 286 * | fcp_online_child |
287 287 * | |
288 288 * | Set device online |
289 289 * | using NDI or MDI. |
290 290 * +-----------------------+
291 291 *
292 292 * ............................................................................
293 293 *
294 294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 295 * what is described here. We only show the target offline path.
296 296 *
297 297 *
298 298 * +--------------------------+
299 299 * | fcp_watch |
300 300 * +--------------------------+
301 301 * |
302 302 * |
303 303 * v
304 304 * +--------------------------+
305 305 * | fcp_scan_offline_tgts |
306 306 * +--------------------------+
307 307 * |
308 308 * |
309 309 * v
310 310 * +--------------------------+
311 311 * | fcp_offline_target_now |
312 312 * +--------------------------+
313 313 * |
314 314 * |
315 315 * v
316 316 * +--------------------------+
317 317 * | fcp_offline_tgt_luns |
318 318 * +--------------------------+
319 319 * |
320 320 * |
321 321 * v
322 322 * +--------------------------+
323 323 * | fcp_offline_lun |
324 324 * +--------------------------+
325 325 * |
326 326 * |
327 327 * v
328 328 * +----------------------------------+
329 329 * | fcp_offline_lun_now |
330 330 * | |
331 331 * | A request (or two if mpxio) is |
332 332 * | sent to the hot plug task using |
333 333 * | a fcp_hp_elem structure. |
334 334 * +----------------------------------+
335 335 */
336 336
337 337 /*
338 338 * Functions registered with DDI framework
339 339 */
340 340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 345 cred_t *credp, int *rval);
346 346
347 347 /*
348 348 * Functions registered with FC Transport framework
349 349 */
350 350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 351 fc_attach_cmd_t cmd, uint32_t s_id);
352 352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 353 fc_detach_cmd_t cmd);
354 354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 356 uint32_t claimed);
357 357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 358 fc_unsol_buf_t *buf, uint32_t claimed);
359 359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 360 fc_unsol_buf_t *buf, uint32_t claimed);
361 361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 363 uint32_t dev_cnt, uint32_t port_sid);
364 364
365 365 /*
366 366 * Functions registered with SCSA framework
367 367 */
368 368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 379 int whom);
380 380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 382 void (*callback)(caddr_t), caddr_t arg);
383 383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 384 char *name, ddi_eventcookie_t *event_cookiep);
385 385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 386 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 387 ddi_callback_id_t *cb_id);
388 388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 389 ddi_callback_id_t cb_id);
390 390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 391 ddi_eventcookie_t eventid, void *impldata);
392 392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 395 ddi_bus_config_op_t op, void *arg);
396 396
397 397 /*
398 398 * Internal functions
399 399 */
400 400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 401 int mode, int *rval);
402 402
403 403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 404 int mode, int *rval);
405 405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 406 struct fcp_scsi_cmd *fscsi, int mode);
407 407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 408 caddr_t base_addr, int mode);
409 409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 410
411 411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 413 int *fc_pkt_reason, int *fc_pkt_action);
414 414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 422
423 423 static void fcp_handle_devices(struct fcp_port *pptr,
424 424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 425 fcp_map_tag_t *map_tag, int cause);
426 426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 428 int tgt_cnt, int cause);
429 429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 433 int cause);
434 434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 435 uint32_t state);
436 436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 439 uchar_t r_ctl, uchar_t type);
440 440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 446 int nodma, int flags);
447 447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 449 uchar_t *wwn);
450 450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 451 uint32_t d_id);
452 452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 454 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 461 uint16_t lun_num);
462 462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 463 int link_cnt, int tgt_cnt, int cause);
464 464 static void fcp_finish_init(struct fcp_port *pptr);
465 465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 466 int tgt_cnt, int cause);
467 467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 470 int link_cnt, int tgt_cnt, int nowait, int flags);
471 471 static void fcp_offline_target_now(struct fcp_port *pptr,
472 472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 474 int tgt_cnt, int flags);
475 475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 476 int nowait, int flags);
477 477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 478 int tgt_cnt);
479 479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 480 int tgt_cnt, int flags);
481 481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 486 fcp_port *pptr);
487 487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 490 struct fcp_port *pptr);
491 491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 496 fc_portmap_t *map_entry, int link_cnt);
497 497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 500 int internal);
501 501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 503 uint32_t s_id, int instance);
504 504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 505 int instance);
506 506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 508 int);
509 509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 512 int flags);
513 513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 516 int val, int tgtonly, int doset);
517 517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 520 int sleep);
521 521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 522 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 526 int lcount, int tcount);
527 527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 530 int tgt_cnt);
531 531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 532 dev_info_t *pdip, caddr_t name);
533 533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 534 int lcount, int tcount, int flags, int *circ);
535 535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 536 int lcount, int tcount, int flags, int *circ);
537 537 static void fcp_remove_child(struct fcp_lun *plun);
538 538 static void fcp_watch(void *arg);
539 539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 541 struct fcp_lun *rlun, int tgt_cnt);
542 542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 544 uchar_t *wwn, uint16_t lun);
545 545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 546 struct fcp_lun *plun);
547 547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 551 child_info_t *cip);
552 552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 554 int tgt_cnt, int flags);
555 555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 557 int tgt_cnt, int flags, int wait);
558 558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 559 struct fcp_pkt *cmd);
560 560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 561 uint_t statistics);
562 562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 563 static void fcp_update_targets(struct fcp_port *pptr,
564 564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 565 static int fcp_call_finish_init(struct fcp_port *pptr,
566 566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 569 static void fcp_reconfigure_luns(void * tgt_handle);
570 570 static void fcp_free_targets(struct fcp_port *pptr);
571 571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 576 static void fcp_print_error(fc_packet_t *fpkt);
577 577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 581 uint32_t *dev_cnt);
582 582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 585 struct fcp_ioctl *, struct fcp_port **);
586 586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 588 int *rval);
589 589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 593 int *rval);
594 594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 595
596 596 /*
597 597 * New functions added for mpxio support
598 598 */
599 599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 602 int tcount);
603 603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 604 dev_info_t *pdip);
605 605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 610 int what);
611 611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 612 fc_packet_t *fpkt);
613 613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 614
615 615 /*
616 616 * New functions added for lun masking support
617 617 */
618 618 static void fcp_read_blacklist(dev_info_t *dip,
619 619 struct fcp_black_list_entry **pplun_blacklist);
620 620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 621 struct fcp_black_list_entry **pplun_blacklist);
622 622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 623 struct fcp_black_list_entry **pplun_blacklist);
624 624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 626
627 627 /*
628 628 * New functions to support software FCA (like fcoei)
629 629 */
630 630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 631 struct scsi_address *ap, struct scsi_pkt *pkt,
632 632 struct buf *bp, int cmdlen, int statuslen,
633 633 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 634 static void fcp_pseudo_destroy_pkt(
635 635 struct scsi_address *ap, struct scsi_pkt *pkt);
636 636 static void fcp_pseudo_sync_pkt(
637 637 struct scsi_address *ap, struct scsi_pkt *pkt);
638 638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 639 static void fcp_pseudo_dmafree(
640 640 struct scsi_address *ap, struct scsi_pkt *pkt);
641 641
642 642 extern struct mod_ops mod_driverops;
643 643 /*
644 644 * This variable is defined in modctl.c and set to '1' after the root driver
645 645 * and fs are loaded. It serves as an indication that the root filesystem can
646 646 * be used.
647 647 */
648 648 extern int modrootloaded;
649 649 /*
650 650 * This table contains strings associated with the SCSI sense key codes. It
651 651 * is used by FCP to print a clear explanation of the code returned in the
652 652 * sense information by a device.
653 653 */
654 654 extern char *sense_keys[];
655 655 /*
656 656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 657 * under this device that the paths to a physical device are created when
658 658 * MPxIO is used.
659 659 */
660 660 extern dev_info_t *scsi_vhci_dip;
661 661
662 662 /*
663 663 * Report lun processing
664 664 */
665 665 #define FCP_LUN_ADDRESSING 0x80
666 666 #define FCP_PD_ADDRESSING 0x00
667 667 #define FCP_VOLUME_ADDRESSING 0x40
668 668
669 669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 670 #define MAX_INT_DMA 0x7fffffff
671 671 /*
672 672 * Property definitions
673 673 */
674 674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 676 #define TARGET_PROP (char *)fcp_target_prop
677 677 #define LUN_PROP (char *)fcp_lun_prop
678 678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 682 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 685 /*
686 686 * Short hand macros.
687 687 */
688 688 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 689 #define LUN_TGT (plun->lun_tgt)
690 690
691 691 /*
692 692 * Driver private macros
693 693 */
694 694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 695 ((x) >= 'a' && (x) <= 'f') ? \
696 696 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697 697
698 698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699 699
700 700 #define FCP_N_NDI_EVENTS \
701 701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 702
703 703 #define FCP_LINK_STATE_CHANGED(p, c) \
704 704 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 705
706 706 #define FCP_TGT_STATE_CHANGED(t, c) \
707 707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 708
709 709 #define FCP_STATE_CHANGED(p, t, c) \
710 710 (FCP_TGT_STATE_CHANGED(t, c))
711 711
712 712 #define FCP_MUST_RETRY(fpkt) \
713 713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 720 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 721
722 722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 723 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 724 (es)->es_add_code == 0x3f && \
725 725 (es)->es_qual_code == 0x0e)
726 726
727 727 #define FCP_SENSE_NO_LUN(es) \
728 728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 729 (es)->es_add_code == 0x25 && \
730 730 (es)->es_qual_code == 0x0)
731 731
732 732 #define FCP_VERSION "20091208-1.192"
733 733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734 734
735 735 #define FCP_NUM_ELEMENTS(array) \
736 736 (sizeof (array) / sizeof ((array)[0]))
737 737
738 738 /*
739 739 * Debugging, Error reporting, and tracing
740 740 */
741 741 #define FCP_LOG_SIZE 1024 * 1024
742 742
743 743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 746 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 748 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 749 #define FCP_LEVEL_7 0x00040
750 750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752 752
753 753
754 754
755 755 /*
756 756 * Log contents to system messages file
757 757 */
758 758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 767
768 768
769 769 /*
770 770 * Log contents to trace buffer
771 771 */
772 772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 781
782 782
783 783 /*
784 784 * Log contents to both system messages file and trace buffer
785 785 */
786 786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 787 FC_TRACE_LOG_MSG)
788 788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 789 FC_TRACE_LOG_MSG)
790 790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 791 FC_TRACE_LOG_MSG)
792 792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 793 FC_TRACE_LOG_MSG)
794 794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 795 FC_TRACE_LOG_MSG)
796 796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 797 FC_TRACE_LOG_MSG)
798 798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 799 FC_TRACE_LOG_MSG)
800 800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 801 FC_TRACE_LOG_MSG)
802 802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 803 FC_TRACE_LOG_MSG)
804 804 #ifdef DEBUG
805 805 #define FCP_DTRACE fc_trace_debug
806 806 #else
807 807 #define FCP_DTRACE
808 808 #endif
809 809
810 810 #define FCP_TRACE fc_trace_debug
811 811
812 812 static struct cb_ops fcp_cb_ops = {
813 813 fcp_open, /* open */
814 814 fcp_close, /* close */
815 815 nodev, /* strategy */
816 816 nodev, /* print */
817 817 nodev, /* dump */
818 818 nodev, /* read */
819 819 nodev, /* write */
820 820 fcp_ioctl, /* ioctl */
821 821 nodev, /* devmap */
822 822 nodev, /* mmap */
823 823 nodev, /* segmap */
824 824 nochpoll, /* chpoll */
825 825 ddi_prop_op, /* cb_prop_op */
826 826 0, /* streamtab */
827 827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 828 CB_REV, /* rev */
829 829 nodev, /* aread */
830 830 nodev /* awrite */
831 831 };
832 832
833 833
834 834 static struct dev_ops fcp_ops = {
835 835 DEVO_REV,
836 836 0,
837 837 ddi_getinfo_1to1,
838 838 nulldev, /* identify */
839 839 nulldev, /* probe */
840 840 fcp_attach, /* attach and detach are mandatory */
841 841 fcp_detach,
842 842 nodev, /* reset */
843 843 &fcp_cb_ops, /* cb_ops */
844 844 NULL, /* bus_ops */
845 845 NULL, /* power */
846 846 };
847 847
848 848
849 849 char *fcp_version = FCP_NAME_VERSION;
850 850
851 851 static struct modldrv modldrv = {
852 852 &mod_driverops,
853 853 FCP_NAME_VERSION,
854 854 &fcp_ops
855 855 };
856 856
857 857
858 858 static struct modlinkage modlinkage = {
859 859 MODREV_1,
860 860 &modldrv,
861 861 NULL
862 862 };
863 863
864 864
865 865 static fc_ulp_modinfo_t fcp_modinfo = {
866 866 &fcp_modinfo, /* ulp_handle */
867 867 FCTL_ULP_MODREV_4, /* ulp_rev */
868 868 FC4_SCSI_FCP, /* ulp_type */
869 869 "fcp", /* ulp_name */
870 870 FCP_STATEC_MASK, /* ulp_statec_mask */
871 871 fcp_port_attach, /* ulp_port_attach */
872 872 fcp_port_detach, /* ulp_port_detach */
873 873 fcp_port_ioctl, /* ulp_port_ioctl */
874 874 fcp_els_callback, /* ulp_els_callback */
875 875 fcp_data_callback, /* ulp_data_callback */
876 876 fcp_statec_callback /* ulp_statec_callback */
877 877 };
878 878
879 879 #ifdef DEBUG
880 880 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 881 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 882 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 883 FCP_LEVEL_6 | FCP_LEVEL_7)
884 884 #else
885 885 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 886 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 887 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 888 FCP_LEVEL_6 | FCP_LEVEL_7)
889 889 #endif
890 890
891 891 /* FCP global variables */
892 892 int fcp_bus_config_debug = 0;
893 893 static int fcp_log_size = FCP_LOG_SIZE;
894 894 static int fcp_trace = FCP_TRACE_DEFAULT;
895 895 static fc_trace_logq_t *fcp_logq = NULL;
896 896 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 897 /*
898 898 * The auto-configuration is set by default. The only way of disabling it is
899 899 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 900 */
901 901 static int fcp_enable_auto_configuration = 1;
902 902 static int fcp_max_bus_config_retries = 4;
903 903 static int fcp_lun_ready_retry = 300;
904 904 /*
905 905 * The value assigned to the following variable has changed several times due
906 906 * to a problem with the data underruns reporting of some firmware(s). The
907 907 * current value of 50 gives a timeout value of 25 seconds for a max number
908 908 * of 256 LUNs.
909 909 */
910 910 static int fcp_max_target_retries = 50;
911 911 /*
912 912 * Watchdog variables
913 913 * ------------------
914 914 *
915 915 * fcp_watchdog_init
916 916 *
917 917 * Indicates if the watchdog timer is running or not. This is actually
918 918 * a counter of the number of Fibre Channel ports that attached. When
919 919 * the first port attaches the watchdog is started. When the last port
920 920 * detaches the watchdog timer is stopped.
921 921 *
922 922 * fcp_watchdog_time
923 923 *
924 924 * This is the watchdog clock counter. It is incremented by
925 925 * fcp_watchdog_time each time the watchdog timer expires.
926 926 *
927 927 * fcp_watchdog_timeout
928 928 *
929 929 * Increment value of the variable fcp_watchdog_time as well as the
930 930 * the timeout value of the watchdog timer. The unit is 1 second. It
931 931 * is strange that this is not a #define but a variable since the code
932 932 * never changes this value. The reason why it can be said that the
933 933 * unit is 1 second is because the number of ticks for the watchdog
934 934 * timer is determined like this:
935 935 *
936 936 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 937 * drv_usectohz(1000000);
938 938 *
939 939 * The value 1000000 is hard coded in the code.
940 940 *
941 941 * fcp_watchdog_tick
942 942 *
943 943 * Watchdog timer value in ticks.
944 944 */
945 945 static int fcp_watchdog_init = 0;
946 946 static int fcp_watchdog_time = 0;
947 947 static int fcp_watchdog_timeout = 1;
948 948 static int fcp_watchdog_tick;
949 949
950 950 /*
951 951 * fcp_offline_delay is a global variable to enable customisation of
952 952 * the timeout on link offlines or RSCNs. The default value is set
953 953 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 954 * specified in FCP4 Chapter 11 (see www.t10.org).
955 955 *
956 956 * The variable fcp_offline_delay is specified in SECONDS.
957 957 *
958 958 * If we made this a static var then the user would not be able to
959 959 * change it. This variable is set in fcp_attach().
960 960 */
961 961 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962 962
963 963 static void *fcp_softstate = NULL; /* for soft state */
964 964 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 965 static kmutex_t fcp_global_mutex;
966 966 static kmutex_t fcp_ioctl_mutex;
967 967 static dev_info_t *fcp_global_dip = NULL;
968 968 static timeout_id_t fcp_watchdog_id;
969 969 const char *fcp_lun_prop = "lun";
970 970 const char *fcp_sam_lun_prop = "sam-lun";
971 971 const char *fcp_target_prop = "target";
972 972 /*
973 973 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 974 * consolidation.
975 975 */
976 976 const char *fcp_node_wwn_prop = "node-wwn";
977 977 const char *fcp_port_wwn_prop = "port-wwn";
978 978 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 979 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 980 const char *fcp_manual_config_only = "manual_configuration_only";
981 981 const char *fcp_init_port_prop = "initiator-port";
982 982 const char *fcp_tgt_port_prop = "target-port";
983 983 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 984
985 985 static struct fcp_port *fcp_port_head = NULL;
986 986 static ddi_eventcookie_t fcp_insert_eid;
987 987 static ddi_eventcookie_t fcp_remove_eid;
988 988
989 989 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 990 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 991 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 992 };
993 993
994 994 /*
995 995 * List of valid commands for the scsi_ioctl call
996 996 */
997 997 static uint8_t scsi_ioctl_list[] = {
998 998 SCMD_INQUIRY,
999 999 SCMD_REPORT_LUN,
1000 1000 SCMD_READ_CAPACITY
1001 1001 };
1002 1002
1003 1003 /*
1004 1004 * this is used to dummy up a report lun response for cases
1005 1005 * where the target doesn't support it
1006 1006 */
1007 1007 static uchar_t fcp_dummy_lun[] = {
1008 1008 0x00, /* MSB length (length = no of luns * 8) */
1009 1009 0x00,
1010 1010 0x00,
1011 1011 0x08, /* LSB length */
1012 1012 0x00, /* MSB reserved */
1013 1013 0x00,
1014 1014 0x00,
1015 1015 0x00, /* LSB reserved */
1016 1016 FCP_PD_ADDRESSING,
1017 1017 0x00, /* LUN is ZERO at the first level */
1018 1018 0x00,
1019 1019 0x00, /* second level is zero */
1020 1020 0x00,
1021 1021 0x00, /* third level is zero */
1022 1022 0x00,
1023 1023 0x00 /* fourth level is zero */
1024 1024 };
1025 1025
1026 1026 static uchar_t fcp_alpa_to_switch[] = {
1027 1027 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 1028 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 1029 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 1030 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 1031 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 1032 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 1033 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 1034 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 1035 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 1036 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 1037 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 1038 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 1039 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 1040 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 1041 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 1042 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 1043 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 1044 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 1045 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 1047 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 1048 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 1049 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 1050 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 1051 };
1052 1052
1053 1053 static caddr_t pid = "SESS01 ";
1054 1054
1055 1055 #if !defined(lint)
1056 1056
1057 1057 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058 1058 fcp_port::fcp_next fcp_watchdog_id))
1059 1059
1060 1060 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 1061
1062 1062 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063 1063 fcp_insert_eid
1064 1064 fcp_remove_eid
1065 1065 fcp_watchdog_time))
1066 1066
1067 1067 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068 1068 fcp_cb_ops
1069 1069 fcp_ops
1070 1070 callb_cpr))
1071 1071
1072 1072 #endif /* lint */
1073 1073
1074 1074 /*
1075 1075 * This table is used to determine whether or not it's safe to copy in
1076 1076 * the target node name for a lun. Since all luns behind the same target
1077 1077 * have the same wwnn, only tagets that do not support multiple luns are
1078 1078 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 1079 */
1080 1080
1081 1081 char *fcp_symmetric_disk_table[] = {
1082 1082 "SEAGATE ST",
1083 1083 "IBM DDYFT",
1084 1084 "SUNW SUNWGS", /* Daktari enclosure */
1085 1085 "SUN SENA", /* SES device */
1086 1086 "SUN SESS01" /* VICOM SVE box */
1087 1087 };
1088 1088
1089 1089 int fcp_symmetric_disk_table_size =
1090 1090 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 1091
1092 1092 /*
1093 1093 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094 1094 * will panic if you don't pass this in to the routine, this information.
1095 1095 * Need to determine what the actual impact to the system is by providing
1096 1096 * this information if any. Since dma allocation is done in pkt_init it may
1097 1097 * not have any impact. These values are straight from the Writing Device
1098 1098 * Driver manual.
1099 1099 */
1100 1100 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 1101 DMA_ATTR_V0, /* ddi_dma_attr version */
1102 1102 0, /* low address */
1103 1103 0xffffffff, /* high address */
1104 1104 0x00ffffff, /* counter upper bound */
1105 1105 1, /* alignment requirements */
1106 1106 0x3f, /* burst sizes */
1107 1107 1, /* minimum DMA access */
1108 1108 0xffffffff, /* maximum DMA access */
1109 1109 (1 << 24) - 1, /* segment boundary restrictions */
1110 1110 1, /* scater/gather list length */
1111 1111 512, /* device granularity */
1112 1112 0 /* DMA flags */
1113 1113 };
1114 1114
1115 1115 /*
1116 1116 * The _init(9e) return value should be that of mod_install(9f). Under
1117 1117 * some circumstances, a failure may not be related mod_install(9f) and
1118 1118 * one would then require a return value to indicate the failure. Looking
1119 1119 * at mod_install(9f), it is expected to return 0 for success and non-zero
1120 1120 * for failure. mod_install(9f) for device drivers, further goes down the
1121 1121 * calling chain and ends up in ddi_installdrv(), whose return values are
1122 1122 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123 1123 * calling chain of mod_install(9f) which return values like EINVAL and
1124 1124 * in some even return -1.
1125 1125 *
1126 1126 * To work around the vagaries of the mod_install() calling chain, return
1127 1127 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 1128 */
1129 1129 int
1130 1130 _init(void)
1131 1131 {
1132 1132 int rval;
1133 1133
1134 1134 /*
1135 1135 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 1136 * before registering with the transport first.
1137 1137 */
1138 1138 if (ddi_soft_state_init(&fcp_softstate,
1139 1139 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 1140 return (EINVAL);
1141 1141 }
1142 1142
1143 1143 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 1144 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 1145
1146 1146 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 1147 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 1148 mutex_destroy(&fcp_global_mutex);
1149 1149 mutex_destroy(&fcp_ioctl_mutex);
1150 1150 ddi_soft_state_fini(&fcp_softstate);
1151 1151 return (ENODEV);
1152 1152 }
1153 1153
1154 1154 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 1155
1156 1156 if ((rval = mod_install(&modlinkage)) != 0) {
1157 1157 fc_trace_free_logq(fcp_logq);
1158 1158 (void) fc_ulp_remove(&fcp_modinfo);
1159 1159 mutex_destroy(&fcp_global_mutex);
1160 1160 mutex_destroy(&fcp_ioctl_mutex);
1161 1161 ddi_soft_state_fini(&fcp_softstate);
1162 1162 rval = ENODEV;
1163 1163 }
1164 1164
1165 1165 return (rval);
1166 1166 }
1167 1167
1168 1168
1169 1169 /*
1170 1170 * the system is done with us as a driver, so clean up
1171 1171 */
1172 1172 int
1173 1173 _fini(void)
1174 1174 {
1175 1175 int rval;
1176 1176
1177 1177 /*
1178 1178 * don't start cleaning up until we know that the module remove
1179 1179 * has worked -- if this works, then we know that each instance
1180 1180 * has successfully been DDI_DETACHed
1181 1181 */
1182 1182 if ((rval = mod_remove(&modlinkage)) != 0) {
1183 1183 return (rval);
1184 1184 }
1185 1185
1186 1186 (void) fc_ulp_remove(&fcp_modinfo);
1187 1187
1188 1188 ddi_soft_state_fini(&fcp_softstate);
1189 1189 mutex_destroy(&fcp_global_mutex);
1190 1190 mutex_destroy(&fcp_ioctl_mutex);
1191 1191 fc_trace_free_logq(fcp_logq);
1192 1192
1193 1193 return (rval);
1194 1194 }
1195 1195
1196 1196
1197 1197 int
1198 1198 _info(struct modinfo *modinfop)
1199 1199 {
1200 1200 return (mod_info(&modlinkage, modinfop));
1201 1201 }
1202 1202
1203 1203
1204 1204 /*
1205 1205 * attach the module
1206 1206 */
1207 1207 static int
1208 1208 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 1209 {
1210 1210 int rval = DDI_SUCCESS;
1211 1211
1212 1212 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 1213 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 1214
1215 1215 if (cmd == DDI_ATTACH) {
1216 1216 /* The FCP pseudo device is created here. */
1217 1217 mutex_enter(&fcp_global_mutex);
1218 1218 fcp_global_dip = devi;
1219 1219 mutex_exit(&fcp_global_mutex);
1220 1220
1221 1221 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 1222 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 1223 ddi_report_dev(fcp_global_dip);
1224 1224 } else {
1225 1225 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 1226 mutex_enter(&fcp_global_mutex);
1227 1227 fcp_global_dip = NULL;
1228 1228 mutex_exit(&fcp_global_mutex);
1229 1229
1230 1230 rval = DDI_FAILURE;
1231 1231 }
1232 1232 /*
1233 1233 * We check the fcp_offline_delay property at this
1234 1234 * point. This variable is global for the driver,
1235 1235 * not specific to an instance.
1236 1236 *
1237 1237 * We do not recommend setting the value to less
1238 1238 * than 10 seconds (RA_TOV_els), or greater than
1239 1239 * 60 seconds.
1240 1240 */
1241 1241 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 1242 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 1243 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 1244 if ((fcp_offline_delay < 10) ||
1245 1245 (fcp_offline_delay > 60)) {
1246 1246 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 1247 "to %d second(s). This is outside the "
1248 1248 "recommended range of 10..60 seconds.",
1249 1249 fcp_offline_delay);
1250 1250 }
1251 1251 }
1252 1252
1253 1253 return (rval);
1254 1254 }
1255 1255
1256 1256
1257 1257 /*ARGSUSED*/
1258 1258 static int
1259 1259 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 1260 {
1261 1261 int res = DDI_SUCCESS;
1262 1262
1263 1263 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 1264 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1265 1265
1266 1266 if (cmd == DDI_DETACH) {
1267 1267 /*
1268 1268 * Check if there are active ports/threads. If there
1269 1269 * are any, we will fail, else we will succeed (there
1270 1270 * should not be much to clean up)
1271 1271 */
1272 1272 mutex_enter(&fcp_global_mutex);
1273 1273 FCP_DTRACE(fcp_logq, "fcp",
1274 1274 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1275 1275 (void *) fcp_port_head);
1276 1276
1277 1277 if (fcp_port_head == NULL) {
1278 1278 ddi_remove_minor_node(fcp_global_dip, NULL);
1279 1279 fcp_global_dip = NULL;
1280 1280 mutex_exit(&fcp_global_mutex);
1281 1281 } else {
1282 1282 mutex_exit(&fcp_global_mutex);
1283 1283 res = DDI_FAILURE;
1284 1284 }
1285 1285 }
1286 1286 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 1287 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1288 1288
1289 1289 return (res);
1290 1290 }
1291 1291
1292 1292
1293 1293 /* ARGSUSED */
1294 1294 static int
1295 1295 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 1296 {
1297 1297 if (otype != OTYP_CHR) {
1298 1298 return (EINVAL);
1299 1299 }
1300 1300
1301 1301 /*
1302 1302 * Allow only root to talk;
1303 1303 */
1304 1304 if (drv_priv(credp)) {
1305 1305 return (EPERM);
1306 1306 }
1307 1307
1308 1308 mutex_enter(&fcp_global_mutex);
1309 1309 if (fcp_oflag & FCP_EXCL) {
1310 1310 mutex_exit(&fcp_global_mutex);
1311 1311 return (EBUSY);
1312 1312 }
1313 1313
1314 1314 if (flag & FEXCL) {
1315 1315 if (fcp_oflag & FCP_OPEN) {
1316 1316 mutex_exit(&fcp_global_mutex);
1317 1317 return (EBUSY);
1318 1318 }
1319 1319 fcp_oflag |= FCP_EXCL;
1320 1320 }
1321 1321 fcp_oflag |= FCP_OPEN;
1322 1322 mutex_exit(&fcp_global_mutex);
1323 1323
1324 1324 return (0);
1325 1325 }
1326 1326
1327 1327
1328 1328 /* ARGSUSED */
1329 1329 static int
1330 1330 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 1331 {
1332 1332 if (otype != OTYP_CHR) {
1333 1333 return (EINVAL);
1334 1334 }
1335 1335
1336 1336 mutex_enter(&fcp_global_mutex);
1337 1337 if (!(fcp_oflag & FCP_OPEN)) {
1338 1338 mutex_exit(&fcp_global_mutex);
1339 1339 return (ENODEV);
1340 1340 }
1341 1341 fcp_oflag = FCP_IDLE;
1342 1342 mutex_exit(&fcp_global_mutex);
1343 1343
1344 1344 return (0);
1345 1345 }
1346 1346
1347 1347
1348 1348 /*
1349 1349 * fcp_ioctl
1350 1350 * Entry point for the FCP ioctls
1351 1351 *
1352 1352 * Input:
1353 1353 * See ioctl(9E)
1354 1354 *
1355 1355 * Output:
1356 1356 * See ioctl(9E)
1357 1357 *
1358 1358 * Returns:
1359 1359 * See ioctl(9E)
1360 1360 *
1361 1361 * Context:
1362 1362 * Kernel context.
1363 1363 */
1364 1364 /* ARGSUSED */
1365 1365 static int
1366 1366 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367 1367 int *rval)
1368 1368 {
1369 1369 int ret = 0;
1370 1370
1371 1371 mutex_enter(&fcp_global_mutex);
1372 1372 if (!(fcp_oflag & FCP_OPEN)) {
1373 1373 mutex_exit(&fcp_global_mutex);
1374 1374 return (ENXIO);
1375 1375 }
1376 1376 mutex_exit(&fcp_global_mutex);
1377 1377
1378 1378 switch (cmd) {
1379 1379 case FCP_TGT_INQUIRY:
1380 1380 case FCP_TGT_CREATE:
1381 1381 case FCP_TGT_DELETE:
1382 1382 ret = fcp_setup_device_data_ioctl(cmd,
1383 1383 (struct fcp_ioctl *)data, mode, rval);
1384 1384 break;
1385 1385
1386 1386 case FCP_TGT_SEND_SCSI:
1387 1387 mutex_enter(&fcp_ioctl_mutex);
1388 1388 ret = fcp_setup_scsi_ioctl(
1389 1389 (struct fcp_scsi_cmd *)data, mode, rval);
1390 1390 mutex_exit(&fcp_ioctl_mutex);
1391 1391 break;
1392 1392
1393 1393 case FCP_STATE_COUNT:
1394 1394 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 1395 mode, rval);
1396 1396 break;
1397 1397 case FCP_GET_TARGET_MAPPINGS:
1398 1398 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 1399 mode, rval);
1400 1400 break;
1401 1401 default:
1402 1402 fcp_log(CE_WARN, NULL,
1403 1403 "!Invalid ioctl opcode = 0x%x", cmd);
1404 1404 ret = EINVAL;
1405 1405 }
1406 1406
1407 1407 return (ret);
1408 1408 }
1409 1409
1410 1410
1411 1411 /*
1412 1412 * fcp_setup_device_data_ioctl
1413 1413 * Setup handler for the "device data" style of
1414 1414 * ioctl for FCP. See "fcp_util.h" for data structure
1415 1415 * definition.
1416 1416 *
1417 1417 * Input:
1418 1418 * cmd = FCP ioctl command
1419 1419 * data = ioctl data
1420 1420 * mode = See ioctl(9E)
1421 1421 *
1422 1422 * Output:
1423 1423 * data = ioctl data
1424 1424 * rval = return value - see ioctl(9E)
1425 1425 *
1426 1426 * Returns:
1427 1427 * See ioctl(9E)
1428 1428 *
1429 1429 * Context:
1430 1430 * Kernel context.
1431 1431 */
1432 1432 /* ARGSUSED */
1433 1433 static int
1434 1434 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435 1435 int *rval)
1436 1436 {
1437 1437 struct fcp_port *pptr;
1438 1438 struct device_data *dev_data;
1439 1439 uint32_t link_cnt;
1440 1440 la_wwn_t *wwn_ptr = NULL;
1441 1441 struct fcp_tgt *ptgt = NULL;
1442 1442 struct fcp_lun *plun = NULL;
1443 1443 int i, error;
1444 1444 struct fcp_ioctl fioctl;
1445 1445
1446 1446 #ifdef _MULTI_DATAMODEL
1447 1447 switch (ddi_model_convert_from(mode & FMODELS)) {
1448 1448 case DDI_MODEL_ILP32: {
1449 1449 struct fcp32_ioctl f32_ioctl;
1450 1450
1451 1451 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 1452 sizeof (struct fcp32_ioctl), mode)) {
1453 1453 return (EFAULT);
1454 1454 }
1455 1455 fioctl.fp_minor = f32_ioctl.fp_minor;
1456 1456 fioctl.listlen = f32_ioctl.listlen;
1457 1457 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 1458 break;
1459 1459 }
1460 1460 case DDI_MODEL_NONE:
1461 1461 if (ddi_copyin((void *)data, (void *)&fioctl,
1462 1462 sizeof (struct fcp_ioctl), mode)) {
1463 1463 return (EFAULT);
1464 1464 }
1465 1465 break;
1466 1466 }
1467 1467
1468 1468 #else /* _MULTI_DATAMODEL */
1469 1469 if (ddi_copyin((void *)data, (void *)&fioctl,
1470 1470 sizeof (struct fcp_ioctl), mode)) {
1471 1471 return (EFAULT);
1472 1472 }
1473 1473 #endif /* _MULTI_DATAMODEL */
1474 1474
1475 1475 /*
1476 1476 * Right now we can assume that the minor number matches with
1477 1477 * this instance of fp. If this changes we will need to
1478 1478 * revisit this logic.
1479 1479 */
1480 1480 mutex_enter(&fcp_global_mutex);
1481 1481 pptr = fcp_port_head;
1482 1482 while (pptr) {
1483 1483 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 1484 break;
1485 1485 } else {
1486 1486 pptr = pptr->port_next;
1487 1487 }
1488 1488 }
1489 1489 mutex_exit(&fcp_global_mutex);
1490 1490 if (pptr == NULL) {
1491 1491 return (ENXIO);
1492 1492 }
1493 1493 mutex_enter(&pptr->port_mutex);
1494 1494
1495 1495
1496 1496 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 1497 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 1498 mutex_exit(&pptr->port_mutex);
1499 1499 return (ENOMEM);
1500 1500 }
1501 1501
1502 1502 if (ddi_copyin(fioctl.list, dev_data,
1503 1503 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 1504 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 1505 mutex_exit(&pptr->port_mutex);
1506 1506 return (EFAULT);
1507 1507 }
1508 1508 link_cnt = pptr->port_link_cnt;
1509 1509
1510 1510 if (cmd == FCP_TGT_INQUIRY) {
1511 1511 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 1512 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 1513 sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 1514 /* This ioctl is requesting INQ info of local HBA */
1515 1515 mutex_exit(&pptr->port_mutex);
1516 1516 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 1517 dev_data[0].dev_status = 0;
1518 1518 if (ddi_copyout(dev_data, fioctl.list,
1519 1519 (sizeof (struct device_data)) * fioctl.listlen,
1520 1520 mode)) {
1521 1521 kmem_free(dev_data,
1522 1522 sizeof (*dev_data) * fioctl.listlen);
1523 1523 return (EFAULT);
1524 1524 }
1525 1525 kmem_free(dev_data,
1526 1526 sizeof (*dev_data) * fioctl.listlen);
1527 1527 #ifdef _MULTI_DATAMODEL
1528 1528 switch (ddi_model_convert_from(mode & FMODELS)) {
1529 1529 case DDI_MODEL_ILP32: {
1530 1530 struct fcp32_ioctl f32_ioctl;
1531 1531 f32_ioctl.fp_minor = fioctl.fp_minor;
1532 1532 f32_ioctl.listlen = fioctl.listlen;
1533 1533 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 1534 if (ddi_copyout((void *)&f32_ioctl,
1535 1535 (void *)data,
1536 1536 sizeof (struct fcp32_ioctl), mode)) {
1537 1537 return (EFAULT);
1538 1538 }
1539 1539 break;
1540 1540 }
1541 1541 case DDI_MODEL_NONE:
1542 1542 if (ddi_copyout((void *)&fioctl, (void *)data,
1543 1543 sizeof (struct fcp_ioctl), mode)) {
1544 1544 return (EFAULT);
1545 1545 }
1546 1546 break;
1547 1547 }
1548 1548 #else /* _MULTI_DATAMODEL */
1549 1549 if (ddi_copyout((void *)&fioctl, (void *)data,
1550 1550 sizeof (struct fcp_ioctl), mode)) {
1551 1551 return (EFAULT);
1552 1552 }
1553 1553 #endif /* _MULTI_DATAMODEL */
1554 1554 return (0);
1555 1555 }
1556 1556 }
1557 1557
1558 1558 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 1559 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 1560 mutex_exit(&pptr->port_mutex);
1561 1561 return (ENXIO);
1562 1562 }
1563 1563
1564 1564 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 1565 i++) {
1566 1566 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 1567
1568 1568 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 1569
1570 1570
1571 1571 dev_data[i].dev_status = ENXIO;
1572 1572
1573 1573 if ((ptgt = fcp_lookup_target(pptr,
1574 1574 (uchar_t *)wwn_ptr)) == NULL) {
1575 1575 mutex_exit(&pptr->port_mutex);
1576 1576 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 1577 wwn_ptr, &error, 0) == NULL) {
1578 1578 dev_data[i].dev_status = ENODEV;
1579 1579 mutex_enter(&pptr->port_mutex);
1580 1580 continue;
1581 1581 } else {
1582 1582
1583 1583 dev_data[i].dev_status = EAGAIN;
1584 1584
1585 1585 mutex_enter(&pptr->port_mutex);
1586 1586 continue;
1587 1587 }
1588 1588 } else {
1589 1589 mutex_enter(&ptgt->tgt_mutex);
1590 1590 if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 1591 FCP_TGT_BUSY)) {
1592 1592 dev_data[i].dev_status = EAGAIN;
1593 1593 mutex_exit(&ptgt->tgt_mutex);
1594 1594 continue;
1595 1595 }
1596 1596
1597 1597 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 1598 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 1599 dev_data[i].dev_status = ENOTSUP;
1600 1600 } else {
1601 1601 dev_data[i].dev_status = ENXIO;
1602 1602 }
1603 1603 mutex_exit(&ptgt->tgt_mutex);
1604 1604 continue;
1605 1605 }
1606 1606
1607 1607 switch (cmd) {
1608 1608 case FCP_TGT_INQUIRY:
1609 1609 /*
1610 1610 * The reason we give device type of
1611 1611 * lun 0 only even though in some
1612 1612 * cases(like maxstrat) lun 0 device
1613 1613 * type may be 0x3f(invalid) is that
1614 1614 * for bridge boxes target will appear
1615 1615 * as luns and the first lun could be
1616 1616 * a device that utility may not care
1617 1617 * about (like a tape device).
1618 1618 */
1619 1619 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 1620 dev_data[i].dev_status = 0;
1621 1621 mutex_exit(&ptgt->tgt_mutex);
1622 1622
1623 1623 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 1624 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 1625 } else {
1626 1626 dev_data[i].dev0_type = plun->lun_type;
1627 1627 }
1628 1628 mutex_enter(&ptgt->tgt_mutex);
1629 1629 break;
1630 1630
1631 1631 case FCP_TGT_CREATE:
1632 1632 mutex_exit(&ptgt->tgt_mutex);
1633 1633 mutex_exit(&pptr->port_mutex);
1634 1634
1635 1635 /*
1636 1636 * serialize state change call backs.
1637 1637 * only one call back will be handled
1638 1638 * at a time.
1639 1639 */
1640 1640 mutex_enter(&fcp_global_mutex);
1641 1641 if (fcp_oflag & FCP_BUSY) {
1642 1642 mutex_exit(&fcp_global_mutex);
1643 1643 if (dev_data) {
1644 1644 kmem_free(dev_data,
1645 1645 sizeof (*dev_data) *
1646 1646 fioctl.listlen);
1647 1647 }
1648 1648 return (EBUSY);
1649 1649 }
1650 1650 fcp_oflag |= FCP_BUSY;
1651 1651 mutex_exit(&fcp_global_mutex);
1652 1652
1653 1653 dev_data[i].dev_status =
1654 1654 fcp_create_on_demand(pptr,
1655 1655 wwn_ptr->raw_wwn);
1656 1656
1657 1657 if (dev_data[i].dev_status != 0) {
1658 1658 char buf[25];
1659 1659
1660 1660 for (i = 0; i < FC_WWN_SIZE; i++) {
1661 1661 (void) sprintf(&buf[i << 1],
1662 1662 "%02x",
1663 1663 wwn_ptr->raw_wwn[i]);
1664 1664 }
1665 1665
1666 1666 fcp_log(CE_WARN, pptr->port_dip,
1667 1667 "!Failed to create nodes for"
1668 1668 " pwwn=%s; error=%x", buf,
1669 1669 dev_data[i].dev_status);
1670 1670 }
1671 1671
1672 1672 /* allow state change call backs again */
1673 1673 mutex_enter(&fcp_global_mutex);
1674 1674 fcp_oflag &= ~FCP_BUSY;
1675 1675 mutex_exit(&fcp_global_mutex);
1676 1676
1677 1677 mutex_enter(&pptr->port_mutex);
1678 1678 mutex_enter(&ptgt->tgt_mutex);
1679 1679
1680 1680 break;
1681 1681
1682 1682 case FCP_TGT_DELETE:
1683 1683 break;
1684 1684
1685 1685 default:
1686 1686 fcp_log(CE_WARN, pptr->port_dip,
1687 1687 "!Invalid device data ioctl "
1688 1688 "opcode = 0x%x", cmd);
1689 1689 }
1690 1690 mutex_exit(&ptgt->tgt_mutex);
1691 1691 }
1692 1692 }
1693 1693 mutex_exit(&pptr->port_mutex);
1694 1694
1695 1695 if (ddi_copyout(dev_data, fioctl.list,
1696 1696 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 1697 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 1698 return (EFAULT);
1699 1699 }
1700 1700 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 1701
1702 1702 #ifdef _MULTI_DATAMODEL
1703 1703 switch (ddi_model_convert_from(mode & FMODELS)) {
1704 1704 case DDI_MODEL_ILP32: {
1705 1705 struct fcp32_ioctl f32_ioctl;
1706 1706
1707 1707 f32_ioctl.fp_minor = fioctl.fp_minor;
1708 1708 f32_ioctl.listlen = fioctl.listlen;
1709 1709 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 1710 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 1711 sizeof (struct fcp32_ioctl), mode)) {
1712 1712 return (EFAULT);
1713 1713 }
1714 1714 break;
1715 1715 }
1716 1716 case DDI_MODEL_NONE:
1717 1717 if (ddi_copyout((void *)&fioctl, (void *)data,
1718 1718 sizeof (struct fcp_ioctl), mode)) {
1719 1719 return (EFAULT);
1720 1720 }
1721 1721 break;
1722 1722 }
1723 1723 #else /* _MULTI_DATAMODEL */
1724 1724
1725 1725 if (ddi_copyout((void *)&fioctl, (void *)data,
1726 1726 sizeof (struct fcp_ioctl), mode)) {
1727 1727 return (EFAULT);
1728 1728 }
1729 1729 #endif /* _MULTI_DATAMODEL */
1730 1730
1731 1731 return (0);
1732 1732 }
1733 1733
1734 1734 /*
1735 1735 * Fetch the target mappings (path, etc.) for all LUNs
1736 1736 * on this port.
1737 1737 */
1738 1738 /* ARGSUSED */
1739 1739 static int
1740 1740 fcp_get_target_mappings(struct fcp_ioctl *data,
1741 1741 int mode, int *rval)
1742 1742 {
1743 1743 struct fcp_port *pptr;
1744 1744 fc_hba_target_mappings_t *mappings;
1745 1745 fc_hba_mapping_entry_t *map;
1746 1746 struct fcp_tgt *ptgt = NULL;
1747 1747 struct fcp_lun *plun = NULL;
1748 1748 int i, mapIndex, mappingSize;
1749 1749 int listlen;
1750 1750 struct fcp_ioctl fioctl;
1751 1751 char *path;
1752 1752 fcp_ent_addr_t sam_lun_addr;
1753 1753
1754 1754 #ifdef _MULTI_DATAMODEL
1755 1755 switch (ddi_model_convert_from(mode & FMODELS)) {
1756 1756 case DDI_MODEL_ILP32: {
1757 1757 struct fcp32_ioctl f32_ioctl;
1758 1758
1759 1759 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 1760 sizeof (struct fcp32_ioctl), mode)) {
1761 1761 return (EFAULT);
1762 1762 }
1763 1763 fioctl.fp_minor = f32_ioctl.fp_minor;
1764 1764 fioctl.listlen = f32_ioctl.listlen;
1765 1765 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 1766 break;
1767 1767 }
1768 1768 case DDI_MODEL_NONE:
1769 1769 if (ddi_copyin((void *)data, (void *)&fioctl,
1770 1770 sizeof (struct fcp_ioctl), mode)) {
1771 1771 return (EFAULT);
1772 1772 }
1773 1773 break;
1774 1774 }
1775 1775
1776 1776 #else /* _MULTI_DATAMODEL */
1777 1777 if (ddi_copyin((void *)data, (void *)&fioctl,
1778 1778 sizeof (struct fcp_ioctl), mode)) {
1779 1779 return (EFAULT);
1780 1780 }
1781 1781 #endif /* _MULTI_DATAMODEL */
1782 1782
1783 1783 /*
1784 1784 * Right now we can assume that the minor number matches with
1785 1785 * this instance of fp. If this changes we will need to
1786 1786 * revisit this logic.
1787 1787 */
1788 1788 mutex_enter(&fcp_global_mutex);
1789 1789 pptr = fcp_port_head;
1790 1790 while (pptr) {
1791 1791 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 1792 break;
1793 1793 } else {
1794 1794 pptr = pptr->port_next;
1795 1795 }
1796 1796 }
1797 1797 mutex_exit(&fcp_global_mutex);
1798 1798 if (pptr == NULL) {
1799 1799 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 1800 fioctl.fp_minor);
1801 1801 return (ENXIO);
1802 1802 }
1803 1803
1804 1804
1805 1805 /* We use listlen to show the total buffer size */
1806 1806 mappingSize = fioctl.listlen;
1807 1807
1808 1808 /* Now calculate how many mapping entries will fit */
1809 1809 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 1810 - sizeof (fc_hba_target_mappings_t);
1811 1811 if (listlen <= 0) {
1812 1812 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 1813 return (ENXIO);
1814 1814 }
1815 1815 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 1816
1817 1817 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 1818 return (ENOMEM);
1819 1819 }
1820 1820 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 1821
1822 1822 /* Now get to work */
1823 1823 mapIndex = 0;
1824 1824
1825 1825 mutex_enter(&pptr->port_mutex);
1826 1826 /* Loop through all targets on this port */
1827 1827 for (i = 0; i < FCP_NUM_HASH; i++) {
1828 1828 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 1829 ptgt = ptgt->tgt_next) {
1830 1830
1831 1831 mutex_enter(&ptgt->tgt_mutex);
1832 1832
1833 1833 /* Loop through all LUNs on this target */
1834 1834 for (plun = ptgt->tgt_lun; plun != NULL;
1835 1835 plun = plun->lun_next) {
1836 1836 if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 1837 continue;
1838 1838 }
1839 1839
1840 1840 path = fcp_get_lun_path(plun);
1841 1841 if (path == NULL) {
1842 1842 continue;
1843 1843 }
1844 1844
1845 1845 if (mapIndex >= listlen) {
1846 1846 mapIndex ++;
1847 1847 kmem_free(path, MAXPATHLEN);
1848 1848 continue;
1849 1849 }
1850 1850 map = &mappings->entries[mapIndex++];
1851 1851 bcopy(path, map->targetDriver,
1852 1852 sizeof (map->targetDriver));
1853 1853 map->d_id = ptgt->tgt_d_id;
1854 1854 map->busNumber = 0;
1855 1855 map->targetNumber = ptgt->tgt_d_id;
1856 1856 map->osLUN = plun->lun_num;
1857 1857
1858 1858 /*
1859 1859 * We had swapped lun when we stored it in
1860 1860 * lun_addr. We need to swap it back before
1861 1861 * returning it to user land
1862 1862 */
1863 1863
1864 1864 sam_lun_addr.ent_addr_0 =
1865 1865 BE_16(plun->lun_addr.ent_addr_0);
1866 1866 sam_lun_addr.ent_addr_1 =
1867 1867 BE_16(plun->lun_addr.ent_addr_1);
1868 1868 sam_lun_addr.ent_addr_2 =
1869 1869 BE_16(plun->lun_addr.ent_addr_2);
1870 1870 sam_lun_addr.ent_addr_3 =
1871 1871 BE_16(plun->lun_addr.ent_addr_3);
1872 1872
1873 1873 bcopy(&sam_lun_addr, &map->samLUN,
1874 1874 FCP_LUN_SIZE);
1875 1875 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 1876 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 1877 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 1878 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 1879
1880 1880 if (plun->lun_guid) {
1881 1881
1882 1882 /* convert ascii wwn to bytes */
1883 1883 fcp_ascii_to_wwn(plun->lun_guid,
1884 1884 map->guid, sizeof (map->guid));
1885 1885
1886 1886 if ((sizeof (map->guid)) <
1887 1887 plun->lun_guid_size / 2) {
1888 1888 cmn_err(CE_WARN,
1889 1889 "fcp_get_target_mappings:"
1890 1890 "guid copy space "
1891 1891 "insufficient."
1892 1892 "Copy Truncation - "
1893 1893 "available %d; need %d",
1894 1894 (int)sizeof (map->guid),
1895 1895 (int)
1896 1896 plun->lun_guid_size / 2);
1897 1897 }
1898 1898 }
1899 1899 kmem_free(path, MAXPATHLEN);
1900 1900 }
1901 1901 mutex_exit(&ptgt->tgt_mutex);
1902 1902 }
1903 1903 }
1904 1904 mutex_exit(&pptr->port_mutex);
1905 1905 mappings->numLuns = mapIndex;
1906 1906
1907 1907 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 1908 kmem_free(mappings, mappingSize);
1909 1909 return (EFAULT);
1910 1910 }
1911 1911 kmem_free(mappings, mappingSize);
1912 1912
1913 1913 #ifdef _MULTI_DATAMODEL
1914 1914 switch (ddi_model_convert_from(mode & FMODELS)) {
1915 1915 case DDI_MODEL_ILP32: {
1916 1916 struct fcp32_ioctl f32_ioctl;
1917 1917
1918 1918 f32_ioctl.fp_minor = fioctl.fp_minor;
1919 1919 f32_ioctl.listlen = fioctl.listlen;
1920 1920 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 1921 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 1922 sizeof (struct fcp32_ioctl), mode)) {
1923 1923 return (EFAULT);
1924 1924 }
1925 1925 break;
1926 1926 }
1927 1927 case DDI_MODEL_NONE:
1928 1928 if (ddi_copyout((void *)&fioctl, (void *)data,
1929 1929 sizeof (struct fcp_ioctl), mode)) {
1930 1930 return (EFAULT);
1931 1931 }
1932 1932 break;
1933 1933 }
1934 1934 #else /* _MULTI_DATAMODEL */
1935 1935
1936 1936 if (ddi_copyout((void *)&fioctl, (void *)data,
1937 1937 sizeof (struct fcp_ioctl), mode)) {
1938 1938 return (EFAULT);
1939 1939 }
1940 1940 #endif /* _MULTI_DATAMODEL */
1941 1941
1942 1942 return (0);
1943 1943 }
1944 1944
1945 1945 /*
1946 1946 * fcp_setup_scsi_ioctl
1947 1947 * Setup handler for the "scsi passthru" style of
1948 1948 * ioctl for FCP. See "fcp_util.h" for data structure
1949 1949 * definition.
1950 1950 *
1951 1951 * Input:
1952 1952 * u_fscsi = ioctl data (user address space)
1953 1953 * mode = See ioctl(9E)
1954 1954 *
1955 1955 * Output:
1956 1956 * u_fscsi = ioctl data (user address space)
1957 1957 * rval = return value - see ioctl(9E)
1958 1958 *
1959 1959 * Returns:
1960 1960 * 0 = OK
1961 1961 * EAGAIN = See errno.h
1962 1962 * EBUSY = See errno.h
1963 1963 * EFAULT = See errno.h
1964 1964 * EINTR = See errno.h
1965 1965 * EINVAL = See errno.h
1966 1966 * EIO = See errno.h
1967 1967 * ENOMEM = See errno.h
1968 1968 * ENXIO = See errno.h
1969 1969 *
1970 1970 * Context:
1971 1971 * Kernel context.
1972 1972 */
1973 1973 /* ARGSUSED */
1974 1974 static int
1975 1975 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976 1976 int mode, int *rval)
1977 1977 {
1978 1978 int ret = 0;
1979 1979 int temp_ret;
1980 1980 caddr_t k_cdbbufaddr = NULL;
1981 1981 caddr_t k_bufaddr = NULL;
1982 1982 caddr_t k_rqbufaddr = NULL;
1983 1983 caddr_t u_cdbbufaddr;
1984 1984 caddr_t u_bufaddr;
1985 1985 caddr_t u_rqbufaddr;
1986 1986 struct fcp_scsi_cmd k_fscsi;
1987 1987
1988 1988 /*
1989 1989 * Get fcp_scsi_cmd array element from user address space
1990 1990 */
1991 1991 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 1992 != 0) {
1993 1993 return (ret);
1994 1994 }
1995 1995
1996 1996
1997 1997 /*
1998 1998 * Even though kmem_alloc() checks the validity of the
1999 1999 * buffer length, this check is needed when the
2000 2000 * kmem_flags set and the zero buffer length is passed.
2001 2001 */
2002 2002 if ((k_fscsi.scsi_cdblen <= 0) ||
2003 2003 (k_fscsi.scsi_buflen <= 0) ||
2004 2004 (k_fscsi.scsi_rqlen <= 0)) {
2005 2005 return (EINVAL);
2006 2006 }
2007 2007
2008 2008 /*
2009 2009 * Allocate data for fcp_scsi_cmd pointer fields
2010 2010 */
2011 2011 if (ret == 0) {
2012 2012 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 2013 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 2014 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2015 2015
2016 2016 if (k_cdbbufaddr == NULL ||
2017 2017 k_bufaddr == NULL ||
2018 2018 k_rqbufaddr == NULL) {
2019 2019 ret = ENOMEM;
2020 2020 }
2021 2021 }
2022 2022
2023 2023 /*
2024 2024 * Get fcp_scsi_cmd pointer fields from user
2025 2025 * address space
2026 2026 */
2027 2027 if (ret == 0) {
2028 2028 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 2029 u_bufaddr = k_fscsi.scsi_bufaddr;
2030 2030 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2031 2031
2032 2032 if (ddi_copyin(u_cdbbufaddr,
2033 2033 k_cdbbufaddr,
2034 2034 k_fscsi.scsi_cdblen,
2035 2035 mode)) {
2036 2036 ret = EFAULT;
2037 2037 } else if (ddi_copyin(u_bufaddr,
2038 2038 k_bufaddr,
2039 2039 k_fscsi.scsi_buflen,
2040 2040 mode)) {
2041 2041 ret = EFAULT;
2042 2042 } else if (ddi_copyin(u_rqbufaddr,
2043 2043 k_rqbufaddr,
2044 2044 k_fscsi.scsi_rqlen,
2045 2045 mode)) {
2046 2046 ret = EFAULT;
2047 2047 }
2048 2048 }
2049 2049
2050 2050 /*
2051 2051 * Send scsi command (blocking)
2052 2052 */
2053 2053 if (ret == 0) {
2054 2054 /*
2055 2055 * Prior to sending the scsi command, the
2056 2056 * fcp_scsi_cmd data structure must contain kernel,
2057 2057 * not user, addresses.
2058 2058 */
2059 2059 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2060 2060 k_fscsi.scsi_bufaddr = k_bufaddr;
2061 2061 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2062 2062
2063 2063 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 2064
2065 2065 /*
2066 2066 * After sending the scsi command, the
2067 2067 * fcp_scsi_cmd data structure must contain user,
2068 2068 * not kernel, addresses.
2069 2069 */
2070 2070 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2071 2071 k_fscsi.scsi_bufaddr = u_bufaddr;
2072 2072 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2073 2073 }
2074 2074
2075 2075 /*
2076 2076 * Put fcp_scsi_cmd pointer fields to user address space
2077 2077 */
2078 2078 if (ret == 0) {
2079 2079 if (ddi_copyout(k_cdbbufaddr,
2080 2080 u_cdbbufaddr,
2081 2081 k_fscsi.scsi_cdblen,
2082 2082 mode)) {
2083 2083 ret = EFAULT;
2084 2084 } else if (ddi_copyout(k_bufaddr,
2085 2085 u_bufaddr,
2086 2086 k_fscsi.scsi_buflen,
2087 2087 mode)) {
2088 2088 ret = EFAULT;
2089 2089 } else if (ddi_copyout(k_rqbufaddr,
2090 2090 u_rqbufaddr,
2091 2091 k_fscsi.scsi_rqlen,
2092 2092 mode)) {
2093 2093 ret = EFAULT;
2094 2094 }
2095 2095 }
2096 2096
2097 2097 /*
2098 2098 * Free data for fcp_scsi_cmd pointer fields
2099 2099 */
2100 2100 if (k_cdbbufaddr != NULL) {
2101 2101 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 2102 }
2103 2103 if (k_bufaddr != NULL) {
2104 2104 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 2105 }
2106 2106 if (k_rqbufaddr != NULL) {
2107 2107 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 2108 }
2109 2109
2110 2110 /*
2111 2111 * Put fcp_scsi_cmd array element to user address space
2112 2112 */
2113 2113 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 2114 if (temp_ret != 0) {
2115 2115 ret = temp_ret;
2116 2116 }
2117 2117
2118 2118 /*
2119 2119 * Return status
2120 2120 */
2121 2121 return (ret);
2122 2122 }
2123 2123
2124 2124
2125 2125 /*
2126 2126 * fcp_copyin_scsi_cmd
2127 2127 * Copy in fcp_scsi_cmd data structure from user address space.
2128 2128 * The data may be in 32 bit or 64 bit modes.
2129 2129 *
2130 2130 * Input:
2131 2131 * base_addr = from address (user address space)
2132 2132 * mode = See ioctl(9E) and ddi_copyin(9F)
2133 2133 *
2134 2134 * Output:
2135 2135 * fscsi = to address (kernel address space)
2136 2136 *
2137 2137 * Returns:
2138 2138 * 0 = OK
2139 2139 * EFAULT = Error
2140 2140 *
2141 2141 * Context:
2142 2142 * Kernel context.
2143 2143 */
2144 2144 static int
2145 2145 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 2146 {
2147 2147 #ifdef _MULTI_DATAMODEL
2148 2148 struct fcp32_scsi_cmd f32scsi;
2149 2149
2150 2150 switch (ddi_model_convert_from(mode & FMODELS)) {
2151 2151 case DDI_MODEL_ILP32:
2152 2152 /*
2153 2153 * Copy data from user address space
2154 2154 */
2155 2155 if (ddi_copyin((void *)base_addr,
2156 2156 &f32scsi,
2157 2157 sizeof (struct fcp32_scsi_cmd),
2158 2158 mode)) {
2159 2159 return (EFAULT);
2160 2160 }
2161 2161 /*
2162 2162 * Convert from 32 bit to 64 bit
2163 2163 */
2164 2164 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 2165 break;
2166 2166 case DDI_MODEL_NONE:
2167 2167 /*
2168 2168 * Copy data from user address space
2169 2169 */
2170 2170 if (ddi_copyin((void *)base_addr,
2171 2171 fscsi,
2172 2172 sizeof (struct fcp_scsi_cmd),
2173 2173 mode)) {
2174 2174 return (EFAULT);
2175 2175 }
2176 2176 break;
2177 2177 }
2178 2178 #else /* _MULTI_DATAMODEL */
2179 2179 /*
2180 2180 * Copy data from user address space
2181 2181 */
2182 2182 if (ddi_copyin((void *)base_addr,
2183 2183 fscsi,
2184 2184 sizeof (struct fcp_scsi_cmd),
2185 2185 mode)) {
2186 2186 return (EFAULT);
2187 2187 }
2188 2188 #endif /* _MULTI_DATAMODEL */
2189 2189
2190 2190 return (0);
2191 2191 }
2192 2192
2193 2193
2194 2194 /*
2195 2195 * fcp_copyout_scsi_cmd
2196 2196 * Copy out fcp_scsi_cmd data structure to user address space.
2197 2197 * The data may be in 32 bit or 64 bit modes.
2198 2198 *
2199 2199 * Input:
2200 2200 * fscsi = to address (kernel address space)
2201 2201 * mode = See ioctl(9E) and ddi_copyin(9F)
2202 2202 *
2203 2203 * Output:
2204 2204 * base_addr = from address (user address space)
2205 2205 *
2206 2206 * Returns:
2207 2207 * 0 = OK
2208 2208 * EFAULT = Error
2209 2209 *
2210 2210 * Context:
2211 2211 * Kernel context.
2212 2212 */
2213 2213 static int
2214 2214 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 2215 {
2216 2216 #ifdef _MULTI_DATAMODEL
2217 2217 struct fcp32_scsi_cmd f32scsi;
2218 2218
2219 2219 switch (ddi_model_convert_from(mode & FMODELS)) {
2220 2220 case DDI_MODEL_ILP32:
2221 2221 /*
2222 2222 * Convert from 64 bit to 32 bit
2223 2223 */
2224 2224 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 2225 /*
2226 2226 * Copy data to user address space
2227 2227 */
2228 2228 if (ddi_copyout(&f32scsi,
2229 2229 (void *)base_addr,
2230 2230 sizeof (struct fcp32_scsi_cmd),
2231 2231 mode)) {
2232 2232 return (EFAULT);
2233 2233 }
2234 2234 break;
2235 2235 case DDI_MODEL_NONE:
2236 2236 /*
2237 2237 * Copy data to user address space
2238 2238 */
2239 2239 if (ddi_copyout(fscsi,
2240 2240 (void *)base_addr,
2241 2241 sizeof (struct fcp_scsi_cmd),
2242 2242 mode)) {
2243 2243 return (EFAULT);
2244 2244 }
2245 2245 break;
2246 2246 }
2247 2247 #else /* _MULTI_DATAMODEL */
2248 2248 /*
2249 2249 * Copy data to user address space
2250 2250 */
2251 2251 if (ddi_copyout(fscsi,
2252 2252 (void *)base_addr,
2253 2253 sizeof (struct fcp_scsi_cmd),
2254 2254 mode)) {
2255 2255 return (EFAULT);
2256 2256 }
2257 2257 #endif /* _MULTI_DATAMODEL */
2258 2258
2259 2259 return (0);
2260 2260 }
2261 2261
2262 2262
2263 2263 /*
2264 2264 * fcp_send_scsi_ioctl
2265 2265 * Sends the SCSI command in blocking mode.
2266 2266 *
2267 2267 * Input:
2268 2268 * fscsi = SCSI command data structure
2269 2269 *
2270 2270 * Output:
2271 2271 * fscsi = SCSI command data structure
2272 2272 *
2273 2273 * Returns:
2274 2274 * 0 = OK
2275 2275 * EAGAIN = See errno.h
2276 2276 * EBUSY = See errno.h
2277 2277 * EINTR = See errno.h
2278 2278 * EINVAL = See errno.h
2279 2279 * EIO = See errno.h
2280 2280 * ENOMEM = See errno.h
2281 2281 * ENXIO = See errno.h
2282 2282 *
2283 2283 * Context:
2284 2284 * Kernel context.
2285 2285 */
2286 2286 static int
2287 2287 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 2288 {
2289 2289 struct fcp_lun *plun = NULL;
2290 2290 struct fcp_port *pptr = NULL;
2291 2291 struct fcp_tgt *ptgt = NULL;
2292 2292 fc_packet_t *fpkt = NULL;
2293 2293 struct fcp_ipkt *icmd = NULL;
2294 2294 int target_created = FALSE;
2295 2295 fc_frame_hdr_t *hp;
2296 2296 struct fcp_cmd fcp_cmd;
2297 2297 struct fcp_cmd *fcmd;
2298 2298 union scsi_cdb *scsi_cdb;
2299 2299 la_wwn_t *wwn_ptr;
2300 2300 int nodma;
2301 2301 struct fcp_rsp *rsp;
2302 2302 struct fcp_rsp_info *rsp_info;
2303 2303 caddr_t rsp_sense;
2304 2304 int buf_len;
2305 2305 int info_len;
2306 2306 int sense_len;
2307 2307 struct scsi_extended_sense *sense_to = NULL;
2308 2308 timeout_id_t tid;
2309 2309 uint8_t reconfig_lun = FALSE;
2310 2310 uint8_t reconfig_pending = FALSE;
2311 2311 uint8_t scsi_cmd;
2312 2312 int rsp_len;
2313 2313 int cmd_index;
2314 2314 int fc_status;
2315 2315 int pkt_state;
2316 2316 int pkt_action;
2317 2317 int pkt_reason;
2318 2318 int ret, xport_retval = ~FC_SUCCESS;
2319 2319 int lcount;
2320 2320 int tcount;
2321 2321 int reconfig_status;
2322 2322 int port_busy = FALSE;
2323 2323 uchar_t *lun_string;
2324 2324
2325 2325 /*
2326 2326 * Check valid SCSI command
2327 2327 */
2328 2328 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 2329 ret = EINVAL;
2330 2330 for (cmd_index = 0;
2331 2331 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 2332 ret != 0;
2333 2333 cmd_index++) {
2334 2334 /*
2335 2335 * First byte of CDB is the SCSI command
2336 2336 */
2337 2337 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 2338 ret = 0;
2339 2339 }
2340 2340 }
2341 2341
2342 2342 /*
2343 2343 * Check inputs
2344 2344 */
2345 2345 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 2346 ret = EINVAL;
2347 2347 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 2348 /* no larger than */
2349 2349 ret = EINVAL;
2350 2350 }
2351 2351
2352 2352
2353 2353 /*
2354 2354 * Find FC port
2355 2355 */
2356 2356 if (ret == 0) {
2357 2357 /*
2358 2358 * Acquire global mutex
2359 2359 */
2360 2360 mutex_enter(&fcp_global_mutex);
2361 2361
2362 2362 pptr = fcp_port_head;
2363 2363 while (pptr) {
2364 2364 if (pptr->port_instance ==
2365 2365 (uint32_t)fscsi->scsi_fc_port_num) {
2366 2366 break;
2367 2367 } else {
2368 2368 pptr = pptr->port_next;
2369 2369 }
2370 2370 }
2371 2371
2372 2372 if (pptr == NULL) {
2373 2373 ret = ENXIO;
2374 2374 } else {
2375 2375 /*
2376 2376 * fc_ulp_busy_port can raise power
2377 2377 * so, we must not hold any mutexes involved in PM
2378 2378 */
2379 2379 mutex_exit(&fcp_global_mutex);
2380 2380 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 2381 }
2382 2382
2383 2383 if (ret == 0) {
2384 2384
2385 2385 /* remember port is busy, so we will release later */
2386 2386 port_busy = TRUE;
2387 2387
2388 2388 /*
2389 2389 * If there is a reconfiguration in progress, wait
2390 2390 * for it to complete.
2391 2391 */
2392 2392
2393 2393 fcp_reconfig_wait(pptr);
2394 2394
2395 2395 /* reacquire mutexes in order */
2396 2396 mutex_enter(&fcp_global_mutex);
2397 2397 mutex_enter(&pptr->port_mutex);
2398 2398
2399 2399 /*
2400 2400 * Will port accept DMA?
2401 2401 */
2402 2402 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 2403 ? 1 : 0;
2404 2404
2405 2405 /*
2406 2406 * If init or offline, device not known
2407 2407 *
2408 2408 * If we are discovering (onlining), we can
2409 2409 * NOT obviously provide reliable data about
2410 2410 * devices until it is complete
2411 2411 */
2412 2412 if (pptr->port_state & (FCP_STATE_INIT |
2413 2413 FCP_STATE_OFFLINE)) {
2414 2414 ret = ENXIO;
2415 2415 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 2416 ret = EBUSY;
2417 2417 } else {
2418 2418 /*
2419 2419 * Find target from pwwn
2420 2420 *
2421 2421 * The wwn must be put into a local
2422 2422 * variable to ensure alignment.
2423 2423 */
2424 2424 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 2425 ptgt = fcp_lookup_target(pptr,
2426 2426 (uchar_t *)wwn_ptr);
2427 2427
2428 2428 /*
2429 2429 * If target not found,
2430 2430 */
2431 2431 if (ptgt == NULL) {
2432 2432 /*
2433 2433 * Note: Still have global &
2434 2434 * port mutexes
2435 2435 */
2436 2436 mutex_exit(&pptr->port_mutex);
2437 2437 ptgt = fcp_port_create_tgt(pptr,
2438 2438 wwn_ptr, &ret, &fc_status,
2439 2439 &pkt_state, &pkt_action,
2440 2440 &pkt_reason);
2441 2441 mutex_enter(&pptr->port_mutex);
2442 2442
2443 2443 fscsi->scsi_fc_status = fc_status;
2444 2444 fscsi->scsi_pkt_state =
2445 2445 (uchar_t)pkt_state;
2446 2446 fscsi->scsi_pkt_reason = pkt_reason;
2447 2447 fscsi->scsi_pkt_action =
2448 2448 (uchar_t)pkt_action;
2449 2449
2450 2450 if (ptgt != NULL) {
2451 2451 target_created = TRUE;
2452 2452 } else if (ret == 0) {
2453 2453 ret = ENOMEM;
2454 2454 }
2455 2455 }
2456 2456
2457 2457 if (ret == 0) {
2458 2458 /*
2459 2459 * Acquire target
2460 2460 */
2461 2461 mutex_enter(&ptgt->tgt_mutex);
2462 2462
2463 2463 /*
2464 2464 * If target is mark or busy,
2465 2465 * then target can not be used
2466 2466 */
2467 2467 if (ptgt->tgt_state &
2468 2468 (FCP_TGT_MARK |
2469 2469 FCP_TGT_BUSY)) {
2470 2470 ret = EBUSY;
2471 2471 } else {
2472 2472 /*
2473 2473 * Mark target as busy
2474 2474 */
2475 2475 ptgt->tgt_state |=
2476 2476 FCP_TGT_BUSY;
2477 2477 }
2478 2478
2479 2479 /*
2480 2480 * Release target
2481 2481 */
2482 2482 lcount = pptr->port_link_cnt;
2483 2483 tcount = ptgt->tgt_change_cnt;
2484 2484 mutex_exit(&ptgt->tgt_mutex);
2485 2485 }
2486 2486 }
2487 2487
2488 2488 /*
2489 2489 * Release port
2490 2490 */
2491 2491 mutex_exit(&pptr->port_mutex);
2492 2492 }
2493 2493
2494 2494 /*
2495 2495 * Release global mutex
2496 2496 */
2497 2497 mutex_exit(&fcp_global_mutex);
2498 2498 }
2499 2499
2500 2500 if (ret == 0) {
2501 2501 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 2502
2503 2503 /*
2504 2504 * If it's a target device, find lun from pwwn
2505 2505 * The wwn must be put into a local
2506 2506 * variable to ensure alignment.
2507 2507 */
2508 2508 mutex_enter(&pptr->port_mutex);
2509 2509 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 2510 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 2511 /* this is not a target */
2512 2512 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 2513 ret = ENXIO;
2514 2514 } else if ((belun << 16) != 0) {
2515 2515 /*
2516 2516 * Since fcp only support PD and LU addressing method
2517 2517 * so far, the last 6 bytes of a valid LUN are expected
2518 2518 * to be filled with 00h.
2519 2519 */
2520 2520 fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 2521 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 2522 " method 0x%02x with LUN number 0x%016" PRIx64,
2523 2523 (uint8_t)(belun >> 62), belun);
2524 2524 ret = ENXIO;
2525 2525 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 2526 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 2527 /*
2528 2528 * This is a SCSI target, but no LUN at this
2529 2529 * address.
2530 2530 *
2531 2531 * In the future, we may want to send this to
2532 2532 * the target, and let it respond
2533 2533 * appropriately
2534 2534 */
2535 2535 ret = ENXIO;
2536 2536 }
2537 2537 mutex_exit(&pptr->port_mutex);
2538 2538 }
2539 2539
2540 2540 /*
2541 2541 * Finished grabbing external resources
2542 2542 * Allocate internal packet (icmd)
2543 2543 */
2544 2544 if (ret == 0) {
2545 2545 /*
2546 2546 * Calc rsp len assuming rsp info included
2547 2547 */
2548 2548 rsp_len = sizeof (struct fcp_rsp) +
2549 2549 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 2550
2551 2551 icmd = fcp_icmd_alloc(pptr, ptgt,
2552 2552 sizeof (struct fcp_cmd),
2553 2553 rsp_len,
2554 2554 fscsi->scsi_buflen,
2555 2555 nodma,
2556 2556 lcount, /* ipkt_link_cnt */
2557 2557 tcount, /* ipkt_change_cnt */
2558 2558 0, /* cause */
2559 2559 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2560 2560
2561 2561 if (icmd == NULL) {
2562 2562 ret = ENOMEM;
2563 2563 } else {
2564 2564 /*
2565 2565 * Setup internal packet as sema sync
2566 2566 */
2567 2567 fcp_ipkt_sema_init(icmd);
2568 2568 }
2569 2569 }
2570 2570
2571 2571 if (ret == 0) {
2572 2572 /*
2573 2573 * Init fpkt pointer for use.
2574 2574 */
2575 2575
2576 2576 fpkt = icmd->ipkt_fpkt;
2577 2577
2578 2578 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 2579 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2580 2580 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 2581
2582 2582 /*
2583 2583 * Init fcmd pointer for use by SCSI command
2584 2584 */
2585 2585
2586 2586 if (nodma) {
2587 2587 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 2588 } else {
2589 2589 fcmd = &fcp_cmd;
2590 2590 }
2591 2591 bzero(fcmd, sizeof (struct fcp_cmd));
2592 2592 ptgt = plun->lun_tgt;
2593 2593
2594 2594 lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 2595
2596 2596 fcmd->fcp_ent_addr.ent_addr_0 =
2597 2597 BE_16(*(uint16_t *)&(lun_string[0]));
2598 2598 fcmd->fcp_ent_addr.ent_addr_1 =
2599 2599 BE_16(*(uint16_t *)&(lun_string[2]));
2600 2600 fcmd->fcp_ent_addr.ent_addr_2 =
2601 2601 BE_16(*(uint16_t *)&(lun_string[4]));
2602 2602 fcmd->fcp_ent_addr.ent_addr_3 =
2603 2603 BE_16(*(uint16_t *)&(lun_string[6]));
2604 2604
2605 2605 /*
2606 2606 * Setup internal packet(icmd)
2607 2607 */
2608 2608 icmd->ipkt_lun = plun;
2609 2609 icmd->ipkt_restart = 0;
2610 2610 icmd->ipkt_retries = 0;
2611 2611 icmd->ipkt_opcode = 0;
2612 2612
2613 2613 /*
2614 2614 * Init the frame HEADER Pointer for use
2615 2615 */
2616 2616 hp = &fpkt->pkt_cmd_fhdr;
2617 2617
2618 2618 hp->s_id = pptr->port_id;
2619 2619 hp->d_id = ptgt->tgt_d_id;
2620 2620 hp->r_ctl = R_CTL_COMMAND;
2621 2621 hp->type = FC_TYPE_SCSI_FCP;
2622 2622 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 2623 hp->rsvd = 0;
2624 2624 hp->seq_id = 0;
2625 2625 hp->seq_cnt = 0;
2626 2626 hp->ox_id = 0xffff;
2627 2627 hp->rx_id = 0xffff;
2628 2628 hp->ro = 0;
2629 2629
2630 2630 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2631 2631 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2632 2632 fcmd->fcp_cntl.cntl_write_data = 0;
2633 2633 fcmd->fcp_data_len = fscsi->scsi_buflen;
2634 2634
2635 2635 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 2636 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 2637 fscsi->scsi_cdblen);
2638 2638
2639 2639 if (!nodma) {
2640 2640 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 2641 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 2642 }
2643 2643
2644 2644 /*
2645 2645 * Send SCSI command to FC transport
2646 2646 */
2647 2647
2648 2648 if (ret == 0) {
2649 2649 mutex_enter(&ptgt->tgt_mutex);
2650 2650
2651 2651 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 2652 mutex_exit(&ptgt->tgt_mutex);
2653 2653 fscsi->scsi_fc_status = xport_retval =
2654 2654 fc_ulp_transport(pptr->port_fp_handle,
2655 2655 fpkt);
2656 2656 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 2657 ret = EIO;
2658 2658 }
2659 2659 } else {
2660 2660 mutex_exit(&ptgt->tgt_mutex);
2661 2661 ret = EBUSY;
2662 2662 }
2663 2663 }
2664 2664 }
2665 2665
2666 2666 /*
2667 2667 * Wait for completion only if fc_ulp_transport was called and it
2668 2668 * returned a success. This is the only time callback will happen.
2669 2669 * Otherwise, there is no point in waiting
2670 2670 */
2671 2671 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 2672 ret = fcp_ipkt_sema_wait(icmd);
2673 2673 }
2674 2674
2675 2675 /*
2676 2676 * Copy data to IOCTL data structures
2677 2677 */
2678 2678 rsp = NULL;
2679 2679 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 2680 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 2681
2682 2682 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 2683 fcp_log(CE_WARN, pptr->port_dip,
2684 2684 "!SCSI command to d_id=0x%x lun=0x%x"
2685 2685 " failed, Bad FCP response values:"
2686 2686 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 2687 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 2688 ptgt->tgt_d_id, plun->lun_num,
2689 2689 rsp->reserved_0, rsp->reserved_1,
2690 2690 rsp->fcp_u.fcp_status.reserved_0,
2691 2691 rsp->fcp_u.fcp_status.reserved_1,
2692 2692 rsp->fcp_response_len, rsp->fcp_sense_len);
2693 2693
2694 2694 ret = EIO;
2695 2695 }
2696 2696 }
2697 2697
2698 2698 if ((ret == 0) && (rsp != NULL)) {
2699 2699 /*
2700 2700 * Calc response lengths
2701 2701 */
2702 2702 sense_len = 0;
2703 2703 info_len = 0;
2704 2704
2705 2705 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 2706 info_len = rsp->fcp_response_len;
2707 2707 }
2708 2708
2709 2709 rsp_info = (struct fcp_rsp_info *)
2710 2710 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 2711
2712 2712 /*
2713 2713 * Get SCSI status
2714 2714 */
2715 2715 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 2716 /*
2717 2717 * If a lun was just added or removed and the next command
2718 2718 * comes through this interface, we need to capture the check
2719 2719 * condition so we can discover the new topology.
2720 2720 */
2721 2721 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 2722 rsp->fcp_u.fcp_status.sense_len_set) {
2723 2723 sense_len = rsp->fcp_sense_len;
2724 2724 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 2725 sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 2726 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 2727 (FCP_SENSE_NO_LUN(sense_to))) {
2728 2728 reconfig_lun = TRUE;
2729 2729 }
2730 2730 }
2731 2731
2732 2732 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 2733 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 2734 if (reconfig_lun == FALSE) {
2735 2735 reconfig_status =
2736 2736 fcp_is_reconfig_needed(ptgt, fpkt);
2737 2737 }
2738 2738
2739 2739 if ((reconfig_lun == TRUE) ||
2740 2740 (reconfig_status == TRUE)) {
2741 2741 mutex_enter(&ptgt->tgt_mutex);
2742 2742 if (ptgt->tgt_tid == NULL) {
2743 2743 /*
2744 2744 * Either we've been notified the
2745 2745 * REPORT_LUN data has changed, or
2746 2746 * we've determined on our own that
2747 2747 * we're out of date. Kick off
2748 2748 * rediscovery.
2749 2749 */
2750 2750 tid = timeout(fcp_reconfigure_luns,
2751 2751 (caddr_t)ptgt, drv_usectohz(1));
2752 2752
2753 2753 ptgt->tgt_tid = tid;
2754 2754 ptgt->tgt_state |= FCP_TGT_BUSY;
2755 2755 ret = EBUSY;
2756 2756 reconfig_pending = TRUE;
2757 2757 }
2758 2758 mutex_exit(&ptgt->tgt_mutex);
2759 2759 }
2760 2760 }
2761 2761
2762 2762 /*
2763 2763 * Calc residuals and buffer lengths
2764 2764 */
2765 2765
2766 2766 if (ret == 0) {
2767 2767 buf_len = fscsi->scsi_buflen;
2768 2768 fscsi->scsi_bufresid = 0;
2769 2769 if (rsp->fcp_u.fcp_status.resid_under) {
2770 2770 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 2771 fscsi->scsi_bufresid = rsp->fcp_resid;
2772 2772 } else {
2773 2773 cmn_err(CE_WARN, "fcp: bad residue %x "
2774 2774 "for txfer len %x", rsp->fcp_resid,
2775 2775 fscsi->scsi_buflen);
2776 2776 fscsi->scsi_bufresid =
2777 2777 fscsi->scsi_buflen;
2778 2778 }
2779 2779 buf_len -= fscsi->scsi_bufresid;
2780 2780 }
2781 2781 if (rsp->fcp_u.fcp_status.resid_over) {
2782 2782 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 2783 }
2784 2784
2785 2785 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2786 2786 if (fscsi->scsi_rqlen < sense_len) {
2787 2787 sense_len = fscsi->scsi_rqlen;
2788 2788 }
2789 2789
2790 2790 fscsi->scsi_fc_rspcode = 0;
2791 2791 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 2792 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2793 2793 }
2794 2794 fscsi->scsi_pkt_state = fpkt->pkt_state;
2795 2795 fscsi->scsi_pkt_action = fpkt->pkt_action;
2796 2796 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 2797
2798 2798 /*
2799 2799 * Copy data and request sense
2800 2800 *
2801 2801 * Data must be copied by using the FCP_CP_IN macro.
2802 2802 * This will ensure the proper byte order since the data
2803 2803 * is being copied directly from the memory mapped
2804 2804 * device register.
2805 2805 *
2806 2806 * The response (and request sense) will be in the
2807 2807 * correct byte order. No special copy is necessary.
2808 2808 */
2809 2809
2810 2810 if (buf_len) {
2811 2811 FCP_CP_IN(fpkt->pkt_data,
2812 2812 fscsi->scsi_bufaddr,
2813 2813 fpkt->pkt_data_acc,
2814 2814 buf_len);
2815 2815 }
2816 2816 bcopy((void *)rsp_sense,
2817 2817 (void *)fscsi->scsi_rqbufaddr,
2818 2818 sense_len);
2819 2819 }
2820 2820 }
2821 2821
2822 2822 /*
2823 2823 * Cleanup transport data structures if icmd was alloc-ed
2824 2824 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 2825 */
2826 2826 if (icmd != NULL) {
2827 2827 fcp_ipkt_sema_cleanup(icmd);
2828 2828 }
2829 2829
2830 2830 /* restore pm busy/idle status */
2831 2831 if (port_busy) {
2832 2832 fc_ulp_idle_port(pptr->port_fp_handle);
2833 2833 }
2834 2834
2835 2835 /*
2836 2836 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2837 2837 * flag, it'll be cleared when the reconfig is complete.
2838 2838 */
2839 2839 if ((ptgt != NULL) && !reconfig_pending) {
2840 2840 /*
2841 2841 * If target was created,
2842 2842 */
2843 2843 if (target_created) {
2844 2844 mutex_enter(&ptgt->tgt_mutex);
2845 2845 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 2846 mutex_exit(&ptgt->tgt_mutex);
2847 2847 } else {
2848 2848 /*
2849 2849 * De-mark target as busy
2850 2850 */
2851 2851 mutex_enter(&ptgt->tgt_mutex);
2852 2852 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 2853 mutex_exit(&ptgt->tgt_mutex);
2854 2854 }
2855 2855 }
2856 2856 return (ret);
2857 2857 }
2858 2858
2859 2859
2860 2860 static int
2861 2861 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862 2862 fc_packet_t *fpkt)
2863 2863 {
2864 2864 uchar_t *lun_string;
2865 2865 uint16_t lun_num, i;
2866 2866 int num_luns;
2867 2867 int actual_luns;
2868 2868 int num_masked_luns;
2869 2869 int lun_buflen;
2870 2870 struct fcp_lun *plun = NULL;
2871 2871 struct fcp_reportlun_resp *report_lun;
2872 2872 uint8_t reconfig_needed = FALSE;
2873 2873 uint8_t lun_exists = FALSE;
2874 2874 fcp_port_t *pptr = ptgt->tgt_port;
2875 2875
2876 2876 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 2877
2878 2878 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 2879 fpkt->pkt_datalen);
2880 2880
2881 2881 /* get number of luns (which is supplied as LUNS * 8) */
2882 2882 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 2883
2884 2884 /*
2885 2885 * Figure out exactly how many lun strings our response buffer
2886 2886 * can hold.
2887 2887 */
2888 2888 lun_buflen = (fpkt->pkt_datalen -
2889 2889 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 2890
2891 2891 /*
2892 2892 * Is our response buffer full or not? We don't want to
2893 2893 * potentially walk beyond the number of luns we have.
2894 2894 */
2895 2895 if (num_luns <= lun_buflen) {
2896 2896 actual_luns = num_luns;
2897 2897 } else {
2898 2898 actual_luns = lun_buflen;
2899 2899 }
2900 2900
2901 2901 mutex_enter(&ptgt->tgt_mutex);
2902 2902
2903 2903 /* Scan each lun to see if we have masked it. */
2904 2904 num_masked_luns = 0;
2905 2905 if (fcp_lun_blacklist != NULL) {
2906 2906 for (i = 0; i < actual_luns; i++) {
2907 2907 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 2908 switch (lun_string[0] & 0xC0) {
2909 2909 case FCP_LUN_ADDRESSING:
2910 2910 case FCP_PD_ADDRESSING:
2911 2911 case FCP_VOLUME_ADDRESSING:
2912 2912 lun_num = ((lun_string[0] & 0x3F) << 8)
2913 2913 | lun_string[1];
2914 2914 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 2915 lun_num) == TRUE) {
2916 2916 num_masked_luns++;
2917 2917 }
2918 2918 break;
2919 2919 default:
2920 2920 break;
2921 2921 }
2922 2922 }
2923 2923 }
2924 2924
2925 2925 /*
2926 2926 * The quick and easy check. If the number of LUNs reported
2927 2927 * doesn't match the number we currently know about, we need
2928 2928 * to reconfigure.
2929 2929 */
2930 2930 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 2931 mutex_exit(&ptgt->tgt_mutex);
2932 2932 kmem_free(report_lun, fpkt->pkt_datalen);
2933 2933 return (TRUE);
2934 2934 }
2935 2935
2936 2936 /*
2937 2937 * If the quick and easy check doesn't turn up anything, we walk
2938 2938 * the list of luns from the REPORT_LUN response and look for
2939 2939 * any luns we don't know about. If we find one, we know we need
2940 2940 * to reconfigure. We will skip LUNs that are masked because of the
2941 2941 * blacklist.
2942 2942 */
2943 2943 for (i = 0; i < actual_luns; i++) {
2944 2944 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 2945 lun_exists = FALSE;
2946 2946 switch (lun_string[0] & 0xC0) {
2947 2947 case FCP_LUN_ADDRESSING:
2948 2948 case FCP_PD_ADDRESSING:
2949 2949 case FCP_VOLUME_ADDRESSING:
2950 2950 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 2951
2952 2952 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 2953 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 2954 lun_exists = TRUE;
2955 2955 break;
2956 2956 }
2957 2957
2958 2958 for (plun = ptgt->tgt_lun; plun;
2959 2959 plun = plun->lun_next) {
2960 2960 if (plun->lun_num == lun_num) {
2961 2961 lun_exists = TRUE;
2962 2962 break;
2963 2963 }
2964 2964 }
2965 2965 break;
2966 2966 default:
2967 2967 break;
2968 2968 }
2969 2969
2970 2970 if (lun_exists == FALSE) {
2971 2971 reconfig_needed = TRUE;
2972 2972 break;
2973 2973 }
2974 2974 }
2975 2975
2976 2976 mutex_exit(&ptgt->tgt_mutex);
2977 2977 kmem_free(report_lun, fpkt->pkt_datalen);
2978 2978
2979 2979 return (reconfig_needed);
2980 2980 }
2981 2981
2982 2982 /*
2983 2983 * This function is called by fcp_handle_page83 and uses inquiry response data
2984 2984 * stored in plun->lun_inq to determine whether or not a device is a member of
2985 2985 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986 2986 * otherwise 1.
2987 2987 */
2988 2988 static int
2989 2989 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 2990 {
2991 2991 struct scsi_inquiry *stdinq = &plun->lun_inq;
2992 2992 char *devidptr;
2993 2993 int i, len;
2994 2994
2995 2995 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 2996 devidptr = fcp_symmetric_disk_table[i];
2997 2997 len = (int)strlen(devidptr);
2998 2998
2999 2999 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 3000 return (0);
3001 3001 }
3002 3002 }
3003 3003 return (1);
3004 3004 }
3005 3005
3006 3006
3007 3007 /*
3008 3008 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009 3009 * It basically returns the current count of # of state change callbacks
3010 3010 * i.e the value of tgt_change_cnt.
3011 3011 *
3012 3012 * INPUT:
3013 3013 * fcp_ioctl.fp_minor -> The minor # of the fp port
3014 3014 * fcp_ioctl.listlen -> 1
3015 3015 * fcp_ioctl.list -> Pointer to a 32 bit integer
3016 3016 */
3017 3017 /*ARGSUSED2*/
3018 3018 static int
3019 3019 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 3020 {
3021 3021 int ret;
3022 3022 uint32_t link_cnt;
3023 3023 struct fcp_ioctl fioctl;
3024 3024 struct fcp_port *pptr = NULL;
3025 3025
3026 3026 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 3027 &pptr)) != 0) {
3028 3028 return (ret);
3029 3029 }
3030 3030
3031 3031 ASSERT(pptr != NULL);
3032 3032
3033 3033 if (fioctl.listlen != 1) {
3034 3034 return (EINVAL);
3035 3035 }
3036 3036
3037 3037 mutex_enter(&pptr->port_mutex);
3038 3038 if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 3039 mutex_exit(&pptr->port_mutex);
3040 3040 return (ENXIO);
3041 3041 }
3042 3042
3043 3043 /*
3044 3044 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 3045 * When the fcp initially attaches to the port and there are nothing
3046 3046 * hanging out of the port or if there was a repeat offline state change
3047 3047 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 3048 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 3049 * will differentiate the 2 cases.
3050 3050 */
3051 3051 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 3052 mutex_exit(&pptr->port_mutex);
3053 3053 return (ENXIO);
3054 3054 }
3055 3055
3056 3056 link_cnt = pptr->port_link_cnt;
3057 3057 mutex_exit(&pptr->port_mutex);
3058 3058
3059 3059 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 3060 return (EFAULT);
3061 3061 }
3062 3062
3063 3063 #ifdef _MULTI_DATAMODEL
3064 3064 switch (ddi_model_convert_from(mode & FMODELS)) {
3065 3065 case DDI_MODEL_ILP32: {
3066 3066 struct fcp32_ioctl f32_ioctl;
3067 3067
3068 3068 f32_ioctl.fp_minor = fioctl.fp_minor;
3069 3069 f32_ioctl.listlen = fioctl.listlen;
3070 3070 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 3071 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 3072 sizeof (struct fcp32_ioctl), mode)) {
3073 3073 return (EFAULT);
3074 3074 }
3075 3075 break;
3076 3076 }
3077 3077 case DDI_MODEL_NONE:
3078 3078 if (ddi_copyout((void *)&fioctl, (void *)data,
3079 3079 sizeof (struct fcp_ioctl), mode)) {
3080 3080 return (EFAULT);
3081 3081 }
3082 3082 break;
3083 3083 }
3084 3084 #else /* _MULTI_DATAMODEL */
3085 3085
3086 3086 if (ddi_copyout((void *)&fioctl, (void *)data,
3087 3087 sizeof (struct fcp_ioctl), mode)) {
3088 3088 return (EFAULT);
3089 3089 }
3090 3090 #endif /* _MULTI_DATAMODEL */
3091 3091
3092 3092 return (0);
3093 3093 }
3094 3094
3095 3095 /*
3096 3096 * This function copies the fcp_ioctl structure passed in from user land
3097 3097 * into kernel land. Handles 32 bit applications.
3098 3098 */
3099 3099 /*ARGSUSED*/
3100 3100 static int
3101 3101 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102 3102 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 3103 {
3104 3104 struct fcp_port *t_pptr;
3105 3105
3106 3106 #ifdef _MULTI_DATAMODEL
3107 3107 switch (ddi_model_convert_from(mode & FMODELS)) {
3108 3108 case DDI_MODEL_ILP32: {
3109 3109 struct fcp32_ioctl f32_ioctl;
3110 3110
3111 3111 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 3112 sizeof (struct fcp32_ioctl), mode)) {
3113 3113 return (EFAULT);
3114 3114 }
3115 3115 fioctl->fp_minor = f32_ioctl.fp_minor;
3116 3116 fioctl->listlen = f32_ioctl.listlen;
3117 3117 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 3118 break;
3119 3119 }
3120 3120 case DDI_MODEL_NONE:
3121 3121 if (ddi_copyin((void *)data, (void *)fioctl,
3122 3122 sizeof (struct fcp_ioctl), mode)) {
3123 3123 return (EFAULT);
3124 3124 }
3125 3125 break;
3126 3126 }
3127 3127
3128 3128 #else /* _MULTI_DATAMODEL */
3129 3129 if (ddi_copyin((void *)data, (void *)fioctl,
3130 3130 sizeof (struct fcp_ioctl), mode)) {
3131 3131 return (EFAULT);
3132 3132 }
3133 3133 #endif /* _MULTI_DATAMODEL */
3134 3134
3135 3135 /*
3136 3136 * Right now we can assume that the minor number matches with
3137 3137 * this instance of fp. If this changes we will need to
3138 3138 * revisit this logic.
3139 3139 */
3140 3140 mutex_enter(&fcp_global_mutex);
3141 3141 t_pptr = fcp_port_head;
3142 3142 while (t_pptr) {
3143 3143 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 3144 break;
3145 3145 } else {
3146 3146 t_pptr = t_pptr->port_next;
3147 3147 }
3148 3148 }
3149 3149 *pptr = t_pptr;
3150 3150 mutex_exit(&fcp_global_mutex);
3151 3151 if (t_pptr == NULL) {
3152 3152 return (ENXIO);
3153 3153 }
3154 3154
3155 3155 return (0);
3156 3156 }
3157 3157
3158 3158 /*
3159 3159 * Function: fcp_port_create_tgt
3160 3160 *
3161 3161 * Description: As the name suggest this function creates the target context
3162 3162 * specified by the the WWN provided by the caller. If the
3163 3163 * creation goes well and the target is known by fp/fctl a PLOGI
3164 3164 * followed by a PRLI are issued.
3165 3165 *
3166 3166 * Argument: pptr fcp port structure
3167 3167 * pwwn WWN of the target
3168 3168 * ret_val Address of the return code. It could be:
3169 3169 * EIO, ENOMEM or 0.
3170 3170 * fc_status PLOGI or PRLI status completion
3171 3171 * fc_pkt_state PLOGI or PRLI state completion
3172 3172 * fc_pkt_reason PLOGI or PRLI reason completion
3173 3173 * fc_pkt_action PLOGI or PRLI action completion
3174 3174 *
3175 3175 * Return Value: NULL if it failed
3176 3176 * Target structure address if it succeeds
3177 3177 */
3178 3178 static struct fcp_tgt *
3179 3179 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180 3180 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 3181 {
3182 3182 struct fcp_tgt *ptgt = NULL;
3183 3183 fc_portmap_t devlist;
3184 3184 int lcount;
3185 3185 int error;
3186 3186
3187 3187 *ret_val = 0;
3188 3188
3189 3189 /*
3190 3190 * Check FC port device & get port map
3191 3191 */
3192 3192 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 3193 &error, 1) == NULL) {
3194 3194 *ret_val = EIO;
3195 3195 } else {
3196 3196 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 3197 &devlist) != FC_SUCCESS) {
3198 3198 *ret_val = EIO;
3199 3199 }
3200 3200 }
3201 3201
3202 3202 /* Set port map flags */
3203 3203 devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 3204
3205 3205 /* Allocate target */
3206 3206 if (*ret_val == 0) {
3207 3207 lcount = pptr->port_link_cnt;
3208 3208 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 3209 if (ptgt == NULL) {
3210 3210 fcp_log(CE_WARN, pptr->port_dip,
3211 3211 "!FC target allocation failed");
3212 3212 *ret_val = ENOMEM;
3213 3213 } else {
3214 3214 /* Setup target */
3215 3215 mutex_enter(&ptgt->tgt_mutex);
3216 3216
3217 3217 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3218 3218 ptgt->tgt_tmp_cnt = 1;
3219 3219 ptgt->tgt_d_id = devlist.map_did.port_id;
3220 3220 ptgt->tgt_hard_addr =
3221 3221 devlist.map_hard_addr.hard_addr;
3222 3222 ptgt->tgt_pd_handle = devlist.map_pd;
3223 3223 ptgt->tgt_fca_dev = NULL;
3224 3224
3225 3225 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 3226 FC_WWN_SIZE);
3227 3227 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 3228 FC_WWN_SIZE);
3229 3229
3230 3230 mutex_exit(&ptgt->tgt_mutex);
3231 3231 }
3232 3232 }
3233 3233
3234 3234 /* Release global mutex for PLOGI and PRLI */
3235 3235 mutex_exit(&fcp_global_mutex);
3236 3236
3237 3237 /* Send PLOGI (If necessary) */
3238 3238 if (*ret_val == 0) {
3239 3239 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 3240 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 3241 }
3242 3242
3243 3243 /* Send PRLI (If necessary) */
3244 3244 if (*ret_val == 0) {
3245 3245 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3247 }
3248 3248
3249 3249 mutex_enter(&fcp_global_mutex);
3250 3250
3251 3251 return (ptgt);
3252 3252 }
3253 3253
3254 3254 /*
3255 3255 * Function: fcp_tgt_send_plogi
3256 3256 *
3257 3257 * Description: This function sends a PLOGI to the target specified by the
3258 3258 * caller and waits till it completes.
3259 3259 *
3260 3260 * Argument: ptgt Target to send the plogi to.
3261 3261 * fc_status Status returned by fp/fctl in the PLOGI request.
3262 3262 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3263 3263 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3264 3264 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3265 3265 *
3266 3266 * Return Value: 0
3267 3267 * ENOMEM
3268 3268 * EIO
3269 3269 *
3270 3270 * Context: User context.
3271 3271 */
3272 3272 static int
3273 3273 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274 3274 int *fc_pkt_reason, int *fc_pkt_action)
3275 3275 {
3276 3276 struct fcp_port *pptr;
3277 3277 struct fcp_ipkt *icmd;
3278 3278 struct fc_packet *fpkt;
3279 3279 fc_frame_hdr_t *hp;
3280 3280 struct la_els_logi logi;
3281 3281 int tcount;
3282 3282 int lcount;
3283 3283 int ret, login_retval = ~FC_SUCCESS;
3284 3284
3285 3285 ret = 0;
3286 3286
3287 3287 pptr = ptgt->tgt_port;
3288 3288
3289 3289 lcount = pptr->port_link_cnt;
3290 3290 tcount = ptgt->tgt_change_cnt;
3291 3291
3292 3292 /* Alloc internal packet */
3293 3293 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 3294 sizeof (la_els_logi_t), 0,
3295 3295 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 3296 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 3297
3298 3298 if (icmd == NULL) {
3299 3299 ret = ENOMEM;
3300 3300 } else {
3301 3301 /*
3302 3302 * Setup internal packet as sema sync
3303 3303 */
3304 3304 fcp_ipkt_sema_init(icmd);
3305 3305
3306 3306 /*
3307 3307 * Setup internal packet (icmd)
3308 3308 */
3309 3309 icmd->ipkt_lun = NULL;
3310 3310 icmd->ipkt_restart = 0;
3311 3311 icmd->ipkt_retries = 0;
3312 3312 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 3313
3314 3314 /*
3315 3315 * Setup fc_packet
3316 3316 */
3317 3317 fpkt = icmd->ipkt_fpkt;
3318 3318
3319 3319 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 3320 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3321 3321 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 3322
3323 3323 /*
3324 3324 * Setup FC frame header
3325 3325 */
3326 3326 hp = &fpkt->pkt_cmd_fhdr;
3327 3327
3328 3328 hp->s_id = pptr->port_id; /* source ID */
3329 3329 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3330 3330 hp->r_ctl = R_CTL_ELS_REQ;
3331 3331 hp->type = FC_TYPE_EXTENDED_LS;
3332 3332 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 3333 hp->seq_id = 0;
3334 3334 hp->rsvd = 0;
3335 3335 hp->df_ctl = 0;
3336 3336 hp->seq_cnt = 0;
3337 3337 hp->ox_id = 0xffff; /* i.e. none */
3338 3338 hp->rx_id = 0xffff; /* i.e. none */
3339 3339 hp->ro = 0;
3340 3340
3341 3341 /*
3342 3342 * Setup PLOGI
3343 3343 */
3344 3344 bzero(&logi, sizeof (struct la_els_logi));
3345 3345 logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 3346
3347 3347 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 3348 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 3349
3350 3350 /*
3351 3351 * Send PLOGI
3352 3352 */
3353 3353 *fc_status = login_retval =
3354 3354 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 3355 if (*fc_status != FC_SUCCESS) {
3356 3356 ret = EIO;
3357 3357 }
3358 3358 }
3359 3359
3360 3360 /*
3361 3361 * Wait for completion
3362 3362 */
3363 3363 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 3364 ret = fcp_ipkt_sema_wait(icmd);
3365 3365
3366 3366 *fc_pkt_state = fpkt->pkt_state;
3367 3367 *fc_pkt_reason = fpkt->pkt_reason;
3368 3368 *fc_pkt_action = fpkt->pkt_action;
3369 3369 }
3370 3370
3371 3371 /*
3372 3372 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 3373 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 3374 * Otherwise, cleanup happens in callback routine.
3375 3375 */
3376 3376 if (icmd != NULL) {
3377 3377 fcp_ipkt_sema_cleanup(icmd);
3378 3378 }
3379 3379
3380 3380 return (ret);
3381 3381 }
3382 3382
3383 3383 /*
3384 3384 * Function: fcp_tgt_send_prli
3385 3385 *
3386 3386 * Description: Does nothing as of today.
3387 3387 *
3388 3388 * Argument: ptgt Target to send the prli to.
3389 3389 * fc_status Status returned by fp/fctl in the PRLI request.
3390 3390 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3391 3391 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3392 3392 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3393 3393 *
3394 3394 * Return Value: 0
3395 3395 */
3396 3396 /*ARGSUSED*/
3397 3397 static int
3398 3398 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399 3399 int *fc_pkt_reason, int *fc_pkt_action)
3400 3400 {
3401 3401 return (0);
3402 3402 }
3403 3403
3404 3404 /*
3405 3405 * Function: fcp_ipkt_sema_init
3406 3406 *
3407 3407 * Description: Initializes the semaphore contained in the internal packet.
3408 3408 *
3409 3409 * Argument: icmd Internal packet the semaphore of which must be
3410 3410 * initialized.
3411 3411 *
3412 3412 * Return Value: None
3413 3413 *
3414 3414 * Context: User context only.
3415 3415 */
3416 3416 static void
3417 3417 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 3418 {
3419 3419 struct fc_packet *fpkt;
3420 3420
3421 3421 fpkt = icmd->ipkt_fpkt;
3422 3422
3423 3423 /* Create semaphore for sync */
3424 3424 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 3425
3426 3426 /* Setup the completion callback */
3427 3427 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 3428 }
3429 3429
3430 3430 /*
3431 3431 * Function: fcp_ipkt_sema_wait
3432 3432 *
3433 3433 * Description: Wait on the semaphore embedded in the internal packet. The
3434 3434 * semaphore is released in the callback.
3435 3435 *
3436 3436 * Argument: icmd Internal packet to wait on for completion.
3437 3437 *
3438 3438 * Return Value: 0
3439 3439 * EIO
3440 3440 * EBUSY
3441 3441 * EAGAIN
3442 3442 *
3443 3443 * Context: User context only.
3444 3444 *
3445 3445 * This function does a conversion between the field pkt_state of the fc_packet
3446 3446 * embedded in the internal packet (icmd) and the code it returns.
3447 3447 */
3448 3448 static int
3449 3449 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 3450 {
3451 3451 struct fc_packet *fpkt;
3452 3452 int ret;
3453 3453
3454 3454 ret = EIO;
3455 3455 fpkt = icmd->ipkt_fpkt;
3456 3456
3457 3457 /*
3458 3458 * Wait on semaphore
3459 3459 */
3460 3460 sema_p(&(icmd->ipkt_sema));
3461 3461
3462 3462 /*
3463 3463 * Check the status of the FC packet
3464 3464 */
3465 3465 switch (fpkt->pkt_state) {
3466 3466 case FC_PKT_SUCCESS:
3467 3467 ret = 0;
3468 3468 break;
3469 3469 case FC_PKT_LOCAL_RJT:
3470 3470 switch (fpkt->pkt_reason) {
3471 3471 case FC_REASON_SEQ_TIMEOUT:
3472 3472 case FC_REASON_RX_BUF_TIMEOUT:
3473 3473 ret = EAGAIN;
3474 3474 break;
3475 3475 case FC_REASON_PKT_BUSY:
3476 3476 ret = EBUSY;
3477 3477 break;
3478 3478 }
3479 3479 break;
3480 3480 case FC_PKT_TIMEOUT:
3481 3481 ret = EAGAIN;
3482 3482 break;
3483 3483 case FC_PKT_LOCAL_BSY:
3484 3484 case FC_PKT_TRAN_BSY:
3485 3485 case FC_PKT_NPORT_BSY:
3486 3486 case FC_PKT_FABRIC_BSY:
3487 3487 ret = EBUSY;
3488 3488 break;
3489 3489 case FC_PKT_LS_RJT:
3490 3490 case FC_PKT_BA_RJT:
3491 3491 switch (fpkt->pkt_reason) {
3492 3492 case FC_REASON_LOGICAL_BSY:
3493 3493 ret = EBUSY;
3494 3494 break;
3495 3495 }
3496 3496 break;
3497 3497 case FC_PKT_FS_RJT:
3498 3498 switch (fpkt->pkt_reason) {
3499 3499 case FC_REASON_FS_LOGICAL_BUSY:
3500 3500 ret = EBUSY;
3501 3501 break;
3502 3502 }
3503 3503 break;
3504 3504 }
3505 3505
3506 3506 return (ret);
3507 3507 }
3508 3508
3509 3509 /*
3510 3510 * Function: fcp_ipkt_sema_callback
3511 3511 *
3512 3512 * Description: Registered as the completion callback function for the FC
3513 3513 * transport when the ipkt semaphore is used for sync. This will
3514 3514 * cleanup the used data structures, if necessary and wake up
3515 3515 * the user thread to complete the transaction.
3516 3516 *
3517 3517 * Argument: fpkt FC packet (points to the icmd)
3518 3518 *
3519 3519 * Return Value: None
3520 3520 *
3521 3521 * Context: User context only
3522 3522 */
3523 3523 static void
3524 3524 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 3525 {
3526 3526 struct fcp_ipkt *icmd;
3527 3527
3528 3528 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 3529
3530 3530 /*
3531 3531 * Wake up user thread
3532 3532 */
3533 3533 sema_v(&(icmd->ipkt_sema));
3534 3534 }
3535 3535
3536 3536 /*
3537 3537 * Function: fcp_ipkt_sema_cleanup
3538 3538 *
3539 3539 * Description: Called to cleanup (if necessary) the data structures used
3540 3540 * when ipkt sema is used for sync. This function will detect
3541 3541 * whether the caller is the last thread (via counter) and
3542 3542 * cleanup only if necessary.
3543 3543 *
3544 3544 * Argument: icmd Internal command packet
3545 3545 *
3546 3546 * Return Value: None
3547 3547 *
3548 3548 * Context: User context only
3549 3549 */
3550 3550 static void
3551 3551 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 3552 {
3553 3553 struct fcp_tgt *ptgt;
3554 3554 struct fcp_port *pptr;
3555 3555
3556 3556 ptgt = icmd->ipkt_tgt;
3557 3557 pptr = icmd->ipkt_port;
3558 3558
3559 3559 /*
3560 3560 * Acquire data structure
3561 3561 */
3562 3562 mutex_enter(&ptgt->tgt_mutex);
3563 3563
3564 3564 /*
3565 3565 * Destroy semaphore
3566 3566 */
3567 3567 sema_destroy(&(icmd->ipkt_sema));
3568 3568
3569 3569 /*
3570 3570 * Cleanup internal packet
3571 3571 */
3572 3572 mutex_exit(&ptgt->tgt_mutex);
3573 3573 fcp_icmd_free(pptr, icmd);
3574 3574 }
3575 3575
3576 3576 /*
3577 3577 * Function: fcp_port_attach
3578 3578 *
3579 3579 * Description: Called by the transport framework to resume, suspend or
3580 3580 * attach a new port.
3581 3581 *
3582 3582 * Argument: ulph Port handle
3583 3583 * *pinfo Port information
3584 3584 * cmd Command
3585 3585 * s_id Port ID
3586 3586 *
3587 3587 * Return Value: FC_FAILURE or FC_SUCCESS
3588 3588 */
3589 3589 /*ARGSUSED*/
3590 3590 static int
3591 3591 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592 3592 fc_attach_cmd_t cmd, uint32_t s_id)
3593 3593 {
3594 3594 int instance;
3595 3595 int res = FC_FAILURE; /* default result */
3596 3596
3597 3597 ASSERT(pinfo != NULL);
3598 3598
3599 3599 instance = ddi_get_instance(pinfo->port_dip);
3600 3600
3601 3601 switch (cmd) {
3602 3602 case FC_CMD_ATTACH:
3603 3603 /*
3604 3604 * this port instance attaching for the first time (or after
3605 3605 * being detached before)
3606 3606 */
3607 3607 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 3608 instance) == DDI_SUCCESS) {
3609 3609 res = FC_SUCCESS;
3610 3610 } else {
3611 3611 ASSERT(ddi_get_soft_state(fcp_softstate,
3612 3612 instance) == NULL);
3613 3613 }
3614 3614 break;
3615 3615
3616 3616 case FC_CMD_RESUME:
3617 3617 case FC_CMD_POWER_UP:
3618 3618 /*
3619 3619 * this port instance was attached and the suspended and
3620 3620 * will now be resumed
3621 3621 */
3622 3622 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 3623 instance) == DDI_SUCCESS) {
3624 3624 res = FC_SUCCESS;
3625 3625 }
3626 3626 break;
3627 3627
3628 3628 default:
3629 3629 /* shouldn't happen */
3630 3630 FCP_TRACE(fcp_logq, "fcp",
3631 3631 fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 3632 "port_attach: unknown cmdcommand: %d", cmd);
3633 3633 break;
3634 3634 }
3635 3635
3636 3636 /* return result */
3637 3637 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 3638 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 3639
3640 3640 return (res);
3641 3641 }
3642 3642
3643 3643
3644 3644 /*
3645 3645 * detach or suspend this port instance
3646 3646 *
3647 3647 * acquires and releases the global mutex
3648 3648 *
3649 3649 * acquires and releases the mutex for this port
3650 3650 *
3651 3651 * acquires and releases the hotplug mutex for this port
3652 3652 */
3653 3653 /*ARGSUSED*/
3654 3654 static int
3655 3655 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656 3656 fc_detach_cmd_t cmd)
3657 3657 {
3658 3658 int flag;
3659 3659 int instance;
3660 3660 struct fcp_port *pptr;
3661 3661
3662 3662 instance = ddi_get_instance(info->port_dip);
3663 3663 pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 3664
3665 3665 switch (cmd) {
3666 3666 case FC_CMD_SUSPEND:
3667 3667 FCP_DTRACE(fcp_logq, "fcp",
3668 3668 fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 3669 "port suspend called for port %d", instance);
3670 3670 flag = FCP_STATE_SUSPENDED;
3671 3671 break;
3672 3672
3673 3673 case FC_CMD_POWER_DOWN:
3674 3674 FCP_DTRACE(fcp_logq, "fcp",
3675 3675 fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 3676 "port power down called for port %d", instance);
3677 3677 flag = FCP_STATE_POWER_DOWN;
3678 3678 break;
3679 3679
3680 3680 case FC_CMD_DETACH:
3681 3681 FCP_DTRACE(fcp_logq, "fcp",
3682 3682 fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 3683 "port detach called for port %d", instance);
3684 3684 flag = FCP_STATE_DETACHING;
3685 3685 break;
3686 3686
3687 3687 default:
3688 3688 /* shouldn't happen */
3689 3689 return (FC_FAILURE);
3690 3690 }
3691 3691 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 3692 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 3693
3694 3694 return (fcp_handle_port_detach(pptr, flag, instance));
3695 3695 }
3696 3696
3697 3697
3698 3698 /*
3699 3699 * called for ioctls on the transport's devctl interface, and the transport
3700 3700 * has passed it to us
3701 3701 *
3702 3702 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703 3703 *
3704 3704 * return FC_SUCCESS if we decide to claim the ioctl,
3705 3705 * else return FC_UNCLAIMED
3706 3706 *
3707 3707 * *rval is set iff we decide to claim the ioctl
3708 3708 */
3709 3709 /*ARGSUSED*/
3710 3710 static int
3711 3711 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712 3712 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 3713 {
3714 3714 int retval = FC_UNCLAIMED; /* return value */
3715 3715 struct fcp_port *pptr = NULL; /* our soft state */
3716 3716 struct devctl_iocdata *dcp = NULL; /* for devctl */
3717 3717 dev_info_t *cdip;
3718 3718 mdi_pathinfo_t *pip = NULL;
3719 3719 char *ndi_nm; /* NDI name */
3720 3720 char *ndi_addr; /* NDI addr */
3721 3721 int is_mpxio, circ;
3722 3722 int devi_entered = 0;
3723 3723 clock_t end_time;
3724 3724
3725 3725 ASSERT(rval != NULL);
3726 3726
3727 3727 FCP_DTRACE(fcp_logq, "fcp",
3728 3728 fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 3729 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 3730
3731 3731 /* if already claimed then forget it */
3732 3732 if (claimed) {
3733 3733 /*
3734 3734 * for now, if this ioctl has already been claimed, then
3735 3735 * we just ignore it
3736 3736 */
3737 3737 return (retval);
3738 3738 }
3739 3739
3740 3740 /* get our port info */
3741 3741 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 3742 fcp_log(CE_WARN, NULL,
3743 3743 "!fcp:Invalid port handle handle in ioctl");
3744 3744 *rval = ENXIO;
3745 3745 return (retval);
3746 3746 }
3747 3747 is_mpxio = pptr->port_mpxio;
3748 3748
3749 3749 switch (cmd) {
3750 3750 case DEVCTL_BUS_GETSTATE:
3751 3751 case DEVCTL_BUS_QUIESCE:
3752 3752 case DEVCTL_BUS_UNQUIESCE:
3753 3753 case DEVCTL_BUS_RESET:
3754 3754 case DEVCTL_BUS_RESETALL:
3755 3755
3756 3756 case DEVCTL_BUS_DEV_CREATE:
3757 3757 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 3758 return (retval);
3759 3759 }
3760 3760 break;
3761 3761
3762 3762 case DEVCTL_DEVICE_GETSTATE:
3763 3763 case DEVCTL_DEVICE_OFFLINE:
3764 3764 case DEVCTL_DEVICE_ONLINE:
3765 3765 case DEVCTL_DEVICE_REMOVE:
3766 3766 case DEVCTL_DEVICE_RESET:
3767 3767 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 3768 return (retval);
3769 3769 }
3770 3770
3771 3771 ASSERT(dcp != NULL);
3772 3772
3773 3773 /* ensure we have a name and address */
3774 3774 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 3775 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 3776 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 3777 fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 3778 "ioctl: can't get name (%s) or addr (%s)",
3779 3779 ndi_nm ? ndi_nm : "<null ptr>",
3780 3780 ndi_addr ? ndi_addr : "<null ptr>");
3781 3781 ndi_dc_freehdl(dcp);
3782 3782 return (retval);
3783 3783 }
3784 3784
3785 3785
3786 3786 /* get our child's DIP */
3787 3787 ASSERT(pptr != NULL);
3788 3788 if (is_mpxio) {
3789 3789 mdi_devi_enter(pptr->port_dip, &circ);
3790 3790 } else {
3791 3791 ndi_devi_enter(pptr->port_dip, &circ);
3792 3792 }
3793 3793 devi_entered = 1;
3794 3794
3795 3795 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 3796 ndi_addr)) == NULL) {
3797 3797 /* Look for virtually enumerated devices. */
3798 3798 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 3799 if (pip == NULL ||
3800 3800 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 3801 *rval = ENXIO;
3802 3802 goto out;
3803 3803 }
3804 3804 }
3805 3805 break;
3806 3806
3807 3807 default:
3808 3808 *rval = ENOTTY;
3809 3809 return (retval);
3810 3810 }
3811 3811
3812 3812 /* this ioctl is ours -- process it */
3813 3813
3814 3814 retval = FC_SUCCESS; /* just means we claim the ioctl */
3815 3815
3816 3816 /* we assume it will be a success; else we'll set error value */
3817 3817 *rval = 0;
3818 3818
3819 3819
3820 3820 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 3821 fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 3822 "ioctl: claiming this one");
3823 3823
3824 3824 /* handle ioctls now */
3825 3825 switch (cmd) {
3826 3826 case DEVCTL_DEVICE_GETSTATE:
3827 3827 ASSERT(cdip != NULL);
3828 3828 ASSERT(dcp != NULL);
3829 3829 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 3830 *rval = EFAULT;
3831 3831 }
3832 3832 break;
3833 3833
3834 3834 case DEVCTL_DEVICE_REMOVE:
3835 3835 case DEVCTL_DEVICE_OFFLINE: {
3836 3836 int flag = 0;
3837 3837 int lcount;
3838 3838 int tcount;
3839 3839 struct fcp_pkt *head = NULL;
3840 3840 struct fcp_lun *plun;
3841 3841 child_info_t *cip = CIP(cdip);
3842 3842 int all = 1;
3843 3843 struct fcp_lun *tplun;
3844 3844 struct fcp_tgt *ptgt;
3845 3845
3846 3846 ASSERT(pptr != NULL);
3847 3847 ASSERT(cdip != NULL);
3848 3848
3849 3849 mutex_enter(&pptr->port_mutex);
3850 3850 if (pip != NULL) {
3851 3851 cip = CIP(pip);
3852 3852 }
3853 3853 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 3854 mutex_exit(&pptr->port_mutex);
3855 3855 *rval = ENXIO;
3856 3856 break;
3857 3857 }
3858 3858
3859 3859 head = fcp_scan_commands(plun);
3860 3860 if (head != NULL) {
3861 3861 fcp_abort_commands(head, LUN_PORT);
3862 3862 }
3863 3863 lcount = pptr->port_link_cnt;
3864 3864 tcount = plun->lun_tgt->tgt_change_cnt;
3865 3865 mutex_exit(&pptr->port_mutex);
3866 3866
3867 3867 if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 3868 flag = NDI_DEVI_REMOVE;
3869 3869 }
3870 3870
3871 3871 if (is_mpxio) {
3872 3872 mdi_devi_exit(pptr->port_dip, circ);
3873 3873 } else {
3874 3874 ndi_devi_exit(pptr->port_dip, circ);
3875 3875 }
3876 3876 devi_entered = 0;
3877 3877
3878 3878 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 3879 FCP_OFFLINE, lcount, tcount, flag);
3880 3880
3881 3881 if (*rval != NDI_SUCCESS) {
3882 3882 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 3883 break;
3884 3884 }
3885 3885
3886 3886 fcp_update_offline_flags(plun);
3887 3887
3888 3888 ptgt = plun->lun_tgt;
3889 3889 mutex_enter(&ptgt->tgt_mutex);
3890 3890 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 3891 tplun->lun_next) {
3892 3892 mutex_enter(&tplun->lun_mutex);
3893 3893 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 3894 all = 0;
3895 3895 }
3896 3896 mutex_exit(&tplun->lun_mutex);
3897 3897 }
3898 3898
3899 3899 if (all) {
3900 3900 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 3901 /*
3902 3902 * The user is unconfiguring/offlining the device.
3903 3903 * If fabric and the auto configuration is set
3904 3904 * then make sure the user is the only one who
3905 3905 * can reconfigure the device.
3906 3906 */
3907 3907 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 3908 fcp_enable_auto_configuration) {
3909 3909 ptgt->tgt_manual_config_only = 1;
3910 3910 }
3911 3911 }
3912 3912 mutex_exit(&ptgt->tgt_mutex);
3913 3913 break;
3914 3914 }
3915 3915
3916 3916 case DEVCTL_DEVICE_ONLINE: {
3917 3917 int lcount;
3918 3918 int tcount;
3919 3919 struct fcp_lun *plun;
3920 3920 child_info_t *cip = CIP(cdip);
3921 3921
3922 3922 ASSERT(cdip != NULL);
3923 3923 ASSERT(pptr != NULL);
3924 3924
3925 3925 mutex_enter(&pptr->port_mutex);
3926 3926 if (pip != NULL) {
3927 3927 cip = CIP(pip);
3928 3928 }
3929 3929 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 3930 mutex_exit(&pptr->port_mutex);
3931 3931 *rval = ENXIO;
3932 3932 break;
3933 3933 }
3934 3934 lcount = pptr->port_link_cnt;
3935 3935 tcount = plun->lun_tgt->tgt_change_cnt;
3936 3936 mutex_exit(&pptr->port_mutex);
3937 3937
3938 3938 /*
3939 3939 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 3940 * to allow the device attach to occur when the device is
3941 3941 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 3942 * from the scsi_probe()).
3943 3943 */
3944 3944 mutex_enter(&LUN_TGT->tgt_mutex);
3945 3945 plun->lun_state |= FCP_LUN_ONLINING;
3946 3946 mutex_exit(&LUN_TGT->tgt_mutex);
3947 3947
3948 3948 if (is_mpxio) {
3949 3949 mdi_devi_exit(pptr->port_dip, circ);
3950 3950 } else {
3951 3951 ndi_devi_exit(pptr->port_dip, circ);
3952 3952 }
3953 3953 devi_entered = 0;
3954 3954
3955 3955 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 3956 FCP_ONLINE, lcount, tcount, 0);
3957 3957
3958 3958 if (*rval != NDI_SUCCESS) {
3959 3959 /* Reset the FCP_LUN_ONLINING bit */
3960 3960 mutex_enter(&LUN_TGT->tgt_mutex);
3961 3961 plun->lun_state &= ~FCP_LUN_ONLINING;
3962 3962 mutex_exit(&LUN_TGT->tgt_mutex);
3963 3963 *rval = EIO;
3964 3964 break;
3965 3965 }
3966 3966 mutex_enter(&LUN_TGT->tgt_mutex);
3967 3967 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 3968 FCP_LUN_ONLINING);
3969 3969 mutex_exit(&LUN_TGT->tgt_mutex);
3970 3970 break;
3971 3971 }
3972 3972
3973 3973 case DEVCTL_BUS_DEV_CREATE: {
3974 3974 uchar_t *bytes = NULL;
3975 3975 uint_t nbytes;
3976 3976 struct fcp_tgt *ptgt = NULL;
3977 3977 struct fcp_lun *plun = NULL;
3978 3978 dev_info_t *useless_dip = NULL;
3979 3979
3980 3980 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 3981 DEVCTL_CONSTRUCT, &useless_dip);
3982 3982 if (*rval != 0 || useless_dip == NULL) {
3983 3983 break;
3984 3984 }
3985 3985
3986 3986 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 3987 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 3988 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 3989 *rval = EINVAL;
3990 3990 (void) ndi_devi_free(useless_dip);
3991 3991 if (bytes != NULL) {
3992 3992 ddi_prop_free(bytes);
3993 3993 }
3994 3994 break;
3995 3995 }
3996 3996
3997 3997 *rval = fcp_create_on_demand(pptr, bytes);
3998 3998 if (*rval == 0) {
3999 3999 mutex_enter(&pptr->port_mutex);
4000 4000 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 4001 if (ptgt) {
4002 4002 /*
4003 4003 * We now have a pointer to the target that
4004 4004 * was created. Lets point to the first LUN on
4005 4005 * this new target.
4006 4006 */
4007 4007 mutex_enter(&ptgt->tgt_mutex);
4008 4008
4009 4009 plun = ptgt->tgt_lun;
4010 4010 /*
4011 4011 * There may be stale/offline LUN entries on
4012 4012 * this list (this is by design) and so we have
4013 4013 * to make sure we point to the first online
4014 4014 * LUN
4015 4015 */
4016 4016 while (plun &&
4017 4017 plun->lun_state & FCP_LUN_OFFLINE) {
4018 4018 plun = plun->lun_next;
4019 4019 }
4020 4020
4021 4021 mutex_exit(&ptgt->tgt_mutex);
4022 4022 }
4023 4023 mutex_exit(&pptr->port_mutex);
4024 4024 }
4025 4025
4026 4026 if (*rval == 0 && ptgt && plun) {
4027 4027 mutex_enter(&plun->lun_mutex);
4028 4028 /*
4029 4029 * Allow up to fcp_lun_ready_retry seconds to
4030 4030 * configure all the luns behind the target.
4031 4031 *
4032 4032 * The intent here is to allow targets with long
4033 4033 * reboot/reset-recovery times to become available
4034 4034 * while limiting the maximum wait time for an
4035 4035 * unresponsive target.
4036 4036 */
4037 4037 end_time = ddi_get_lbolt() +
4038 4038 SEC_TO_TICK(fcp_lun_ready_retry);
4039 4039
4040 4040 while (ddi_get_lbolt() < end_time) {
4041 4041 retval = FC_SUCCESS;
4042 4042
4043 4043 /*
4044 4044 * The new ndi interfaces for on-demand creation
4045 4045 * are inflexible, Do some more work to pass on
4046 4046 * a path name of some LUN (design is broken !)
4047 4047 */
4048 4048 if (plun->lun_cip) {
4049 4049 if (plun->lun_mpxio == 0) {
4050 4050 cdip = DIP(plun->lun_cip);
4051 4051 } else {
↓ open down ↓ |
4051 lines elided |
↑ open up ↑ |
4052 4052 cdip = mdi_pi_get_client(
4053 4053 PIP(plun->lun_cip));
4054 4054 }
4055 4055 if (cdip == NULL) {
4056 4056 *rval = ENXIO;
4057 4057 break;
4058 4058 }
4059 4059
4060 4060 if (!i_ddi_devi_attached(cdip)) {
4061 4061 mutex_exit(&plun->lun_mutex);
4062 - delay(drv_usectohz(1000000));
4062 + delay(drv_sectohz(1));
4063 4063 mutex_enter(&plun->lun_mutex);
4064 4064 } else {
4065 4065 /*
4066 4066 * This Lun is ready, lets
4067 4067 * check the next one.
4068 4068 */
4069 4069 mutex_exit(&plun->lun_mutex);
4070 4070 plun = plun->lun_next;
4071 4071 while (plun && (plun->lun_state
4072 4072 & FCP_LUN_OFFLINE)) {
4073 4073 plun = plun->lun_next;
4074 4074 }
4075 4075 if (!plun) {
4076 4076 break;
4077 4077 }
4078 4078 mutex_enter(&plun->lun_mutex);
4079 4079 }
4080 4080 } else {
4081 4081 /*
4082 4082 * lun_cip field for a valid lun
4083 4083 * should never be NULL. Fail the
4084 4084 * command.
4085 4085 */
4086 4086 *rval = ENXIO;
4087 4087 break;
4088 4088 }
4089 4089 }
4090 4090 if (plun) {
4091 4091 mutex_exit(&plun->lun_mutex);
4092 4092 } else {
4093 4093 char devnm[MAXNAMELEN];
4094 4094 int nmlen;
4095 4095
4096 4096 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 4097 ddi_node_name(cdip),
4098 4098 ddi_get_name_addr(cdip));
4099 4099
4100 4100 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 4101 0) {
4102 4102 *rval = EFAULT;
4103 4103 }
4104 4104 }
4105 4105 } else {
4106 4106 int i;
4107 4107 char buf[25];
4108 4108
4109 4109 for (i = 0; i < FC_WWN_SIZE; i++) {
4110 4110 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 4111 }
4112 4112
4113 4113 fcp_log(CE_WARN, pptr->port_dip,
4114 4114 "!Failed to create nodes for pwwn=%s; error=%x",
4115 4115 buf, *rval);
4116 4116 }
4117 4117
4118 4118 (void) ndi_devi_free(useless_dip);
4119 4119 ddi_prop_free(bytes);
4120 4120 break;
4121 4121 }
4122 4122
4123 4123 case DEVCTL_DEVICE_RESET: {
4124 4124 struct fcp_lun *plun;
4125 4125 child_info_t *cip = CIP(cdip);
4126 4126
4127 4127 ASSERT(cdip != NULL);
4128 4128 ASSERT(pptr != NULL);
4129 4129 mutex_enter(&pptr->port_mutex);
4130 4130 if (pip != NULL) {
4131 4131 cip = CIP(pip);
4132 4132 }
4133 4133 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 4134 mutex_exit(&pptr->port_mutex);
4135 4135 *rval = ENXIO;
4136 4136 break;
4137 4137 }
4138 4138 mutex_exit(&pptr->port_mutex);
4139 4139
4140 4140 mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 4141 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 4142 mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 4143
4144 4144 *rval = ENXIO;
4145 4145 break;
4146 4146 }
4147 4147
4148 4148 if (plun->lun_sd == NULL) {
4149 4149 mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 4150
4151 4151 *rval = ENXIO;
4152 4152 break;
4153 4153 }
4154 4154 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 4155
4156 4156 /*
4157 4157 * set up ap so that fcp_reset can figure out
4158 4158 * which target to reset
4159 4159 */
4160 4160 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 4161 RESET_TARGET) == FALSE) {
4162 4162 *rval = EIO;
4163 4163 }
4164 4164 break;
4165 4165 }
4166 4166
4167 4167 case DEVCTL_BUS_GETSTATE:
4168 4168 ASSERT(dcp != NULL);
4169 4169 ASSERT(pptr != NULL);
4170 4170 ASSERT(pptr->port_dip != NULL);
4171 4171 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 4172 NDI_SUCCESS) {
4173 4173 *rval = EFAULT;
4174 4174 }
4175 4175 break;
4176 4176
4177 4177 case DEVCTL_BUS_QUIESCE:
4178 4178 case DEVCTL_BUS_UNQUIESCE:
4179 4179 *rval = ENOTSUP;
4180 4180 break;
4181 4181
4182 4182 case DEVCTL_BUS_RESET:
4183 4183 case DEVCTL_BUS_RESETALL:
4184 4184 ASSERT(pptr != NULL);
4185 4185 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4186 4186 break;
4187 4187
4188 4188 default:
4189 4189 ASSERT(dcp != NULL);
4190 4190 *rval = ENOTTY;
4191 4191 break;
4192 4192 }
4193 4193
4194 4194 /* all done -- clean up and return */
4195 4195 out: if (devi_entered) {
4196 4196 if (is_mpxio) {
4197 4197 mdi_devi_exit(pptr->port_dip, circ);
4198 4198 } else {
4199 4199 ndi_devi_exit(pptr->port_dip, circ);
4200 4200 }
4201 4201 }
4202 4202
4203 4203 if (dcp != NULL) {
4204 4204 ndi_dc_freehdl(dcp);
4205 4205 }
4206 4206
4207 4207 return (retval);
4208 4208 }
4209 4209
4210 4210
4211 4211 /*ARGSUSED*/
4212 4212 static int
4213 4213 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214 4214 uint32_t claimed)
4215 4215 {
4216 4216 uchar_t r_ctl;
4217 4217 uchar_t ls_code;
4218 4218 struct fcp_port *pptr;
4219 4219
4220 4220 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 4221 return (FC_UNCLAIMED);
4222 4222 }
4223 4223
4224 4224 mutex_enter(&pptr->port_mutex);
4225 4225 if (pptr->port_state & (FCP_STATE_DETACHING |
4226 4226 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 4227 mutex_exit(&pptr->port_mutex);
4228 4228 return (FC_UNCLAIMED);
4229 4229 }
4230 4230 mutex_exit(&pptr->port_mutex);
4231 4231
4232 4232 r_ctl = buf->ub_frame.r_ctl;
4233 4233
4234 4234 switch (r_ctl & R_CTL_ROUTING) {
4235 4235 case R_CTL_EXTENDED_SVC:
4236 4236 if (r_ctl == R_CTL_ELS_REQ) {
4237 4237 ls_code = buf->ub_buffer[0];
4238 4238
4239 4239 switch (ls_code) {
4240 4240 case LA_ELS_PRLI:
4241 4241 /*
4242 4242 * We really don't care if something fails.
4243 4243 * If the PRLI was not sent out, then the
4244 4244 * other end will time it out.
4245 4245 */
4246 4246 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 4247 return (FC_SUCCESS);
4248 4248 }
4249 4249 return (FC_UNCLAIMED);
4250 4250 /* NOTREACHED */
4251 4251
4252 4252 default:
4253 4253 break;
4254 4254 }
4255 4255 }
4256 4256 /* FALLTHROUGH */
4257 4257
4258 4258 default:
4259 4259 return (FC_UNCLAIMED);
4260 4260 }
4261 4261 }
4262 4262
4263 4263
4264 4264 /*ARGSUSED*/
4265 4265 static int
4266 4266 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267 4267 uint32_t claimed)
4268 4268 {
4269 4269 return (FC_UNCLAIMED);
4270 4270 }
4271 4271
4272 4272 /*
4273 4273 * Function: fcp_statec_callback
4274 4274 *
4275 4275 * Description: The purpose of this function is to handle a port state change.
4276 4276 * It is called from fp/fctl and, in a few instances, internally.
4277 4277 *
4278 4278 * Argument: ulph fp/fctl port handle
4279 4279 * port_handle fcp_port structure
4280 4280 * port_state Physical state of the port
4281 4281 * port_top Topology
4282 4282 * *devlist Pointer to the first entry of a table
4283 4283 * containing the remote ports that can be
4284 4284 * reached.
4285 4285 * dev_cnt Number of entries pointed by devlist.
4286 4286 * port_sid Port ID of the local port.
4287 4287 *
4288 4288 * Return Value: None
4289 4289 */
4290 4290 /*ARGSUSED*/
4291 4291 static void
4292 4292 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293 4293 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294 4294 uint32_t dev_cnt, uint32_t port_sid)
4295 4295 {
4296 4296 uint32_t link_count;
4297 4297 int map_len = 0;
4298 4298 struct fcp_port *pptr;
4299 4299 fcp_map_tag_t *map_tag = NULL;
4300 4300
4301 4301 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 4302 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 4303 return; /* nothing to work with! */
4304 4304 }
4305 4305
4306 4306 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 4307 fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 4308 "fcp_statec_callback: port state/dev_cnt/top ="
4309 4309 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 4310 dev_cnt, port_top);
4311 4311
4312 4312 mutex_enter(&pptr->port_mutex);
4313 4313
4314 4314 /*
4315 4315 * If a thread is in detach, don't do anything.
4316 4316 */
4317 4317 if (pptr->port_state & (FCP_STATE_DETACHING |
4318 4318 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 4319 mutex_exit(&pptr->port_mutex);
4320 4320 return;
4321 4321 }
4322 4322
4323 4323 /*
4324 4324 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 4325 * init_pkt is called, it knows whether or not the target's status
4326 4326 * (or pd) might be changing.
4327 4327 */
4328 4328
4329 4329 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 4330 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 4331 }
4332 4332
4333 4333 /*
4334 4334 * the transport doesn't allocate or probe unless being
4335 4335 * asked to by either the applications or ULPs
4336 4336 *
4337 4337 * in cases where the port is OFFLINE at the time of port
4338 4338 * attach callback and the link comes ONLINE later, for
4339 4339 * easier automatic node creation (i.e. without you having to
4340 4340 * go out and run the utility to perform LOGINs) the
4341 4341 * following conditional is helpful
4342 4342 */
4343 4343 pptr->port_phys_state = port_state;
4344 4344
4345 4345 if (dev_cnt) {
4346 4346 mutex_exit(&pptr->port_mutex);
4347 4347
4348 4348 map_len = sizeof (*map_tag) * dev_cnt;
4349 4349 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 4350 if (map_tag == NULL) {
4351 4351 fcp_log(CE_WARN, pptr->port_dip,
4352 4352 "!fcp%d: failed to allocate for map tags; "
4353 4353 " state change will not be processed",
4354 4354 pptr->port_instance);
4355 4355
4356 4356 mutex_enter(&pptr->port_mutex);
4357 4357 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 4358 mutex_exit(&pptr->port_mutex);
4359 4359
4360 4360 return;
4361 4361 }
4362 4362
4363 4363 mutex_enter(&pptr->port_mutex);
4364 4364 }
4365 4365
4366 4366 if (pptr->port_id != port_sid) {
4367 4367 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 4368 fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 4369 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 4370 port_sid);
4371 4371 /*
4372 4372 * The local port changed ID. It is the first time a port ID
4373 4373 * is assigned or something drastic happened. We might have
4374 4374 * been unplugged and replugged on another loop or fabric port
4375 4375 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 4376 * the fabric we were plugged into.
4377 4377 */
4378 4378 pptr->port_id = port_sid;
4379 4379 }
4380 4380
4381 4381 switch (FC_PORT_STATE_MASK(port_state)) {
4382 4382 case FC_STATE_OFFLINE:
4383 4383 case FC_STATE_RESET_REQUESTED:
4384 4384 /*
4385 4385 * link has gone from online to offline -- just update the
4386 4386 * state of this port to BUSY and MARKed to go offline
4387 4387 */
4388 4388 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 4389 fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 4390 "link went offline");
4391 4391 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 4392 /*
4393 4393 * We were offline a while ago and this one
4394 4394 * seems to indicate that the loop has gone
4395 4395 * dead forever.
4396 4396 */
4397 4397 pptr->port_tmp_cnt += dev_cnt;
4398 4398 pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 4399 pptr->port_state |= FCP_STATE_INIT;
4400 4400 link_count = pptr->port_link_cnt;
4401 4401 fcp_handle_devices(pptr, devlist, dev_cnt,
4402 4402 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 4403 } else {
4404 4404 pptr->port_link_cnt++;
4405 4405 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 4406 fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 4407 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 4408 if (pptr->port_mpxio) {
4409 4409 fcp_update_mpxio_path_verifybusy(pptr);
4410 4410 }
4411 4411 pptr->port_state |= FCP_STATE_OFFLINE;
4412 4412 pptr->port_state &=
4413 4413 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 4414 pptr->port_tmp_cnt = 0;
4415 4415 }
4416 4416 mutex_exit(&pptr->port_mutex);
4417 4417 break;
4418 4418
4419 4419 case FC_STATE_ONLINE:
4420 4420 case FC_STATE_LIP:
4421 4421 case FC_STATE_LIP_LBIT_SET:
4422 4422 /*
↓ open down ↓ |
350 lines elided |
↑ open up ↑ |
4423 4423 * link has gone from offline to online
4424 4424 */
4425 4425 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 4426 fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 4427 "link went online");
4428 4428
4429 4429 pptr->port_link_cnt++;
4430 4430
4431 4431 while (pptr->port_ipkt_cnt) {
4432 4432 mutex_exit(&pptr->port_mutex);
4433 - delay(drv_usectohz(1000000));
4433 + delay(drv_sectohz(1));
4434 4434 mutex_enter(&pptr->port_mutex);
4435 4435 }
4436 4436
4437 4437 pptr->port_topology = port_top;
4438 4438
4439 4439 /*
4440 4440 * The state of the targets and luns accessible through this
4441 4441 * port is updated.
4442 4442 */
4443 4443 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 4444 FCP_CAUSE_LINK_CHANGE);
4445 4445
4446 4446 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 4447 pptr->port_state |= FCP_STATE_ONLINING;
4448 4448 pptr->port_tmp_cnt = dev_cnt;
4449 4449 link_count = pptr->port_link_cnt;
4450 4450
4451 4451 pptr->port_deadline = fcp_watchdog_time +
4452 4452 FCP_ICMD_DEADLINE;
4453 4453
4454 4454 if (!dev_cnt) {
4455 4455 /*
4456 4456 * We go directly to the online state if no remote
4457 4457 * ports were discovered.
4458 4458 */
4459 4459 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 4460 fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 4461 "No remote ports discovered");
4462 4462
4463 4463 pptr->port_state &= ~FCP_STATE_ONLINING;
4464 4464 pptr->port_state |= FCP_STATE_ONLINE;
4465 4465 }
4466 4466
4467 4467 switch (port_top) {
4468 4468 case FC_TOP_FABRIC:
4469 4469 case FC_TOP_PUBLIC_LOOP:
4470 4470 case FC_TOP_PRIVATE_LOOP:
4471 4471 case FC_TOP_PT_PT:
4472 4472
4473 4473 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 4474 fcp_retry_ns_registry(pptr, port_sid);
4475 4475 }
4476 4476
4477 4477 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 4478 map_tag, FCP_CAUSE_LINK_CHANGE);
4479 4479 break;
4480 4480
4481 4481 default:
4482 4482 /*
4483 4483 * We got here because we were provided with an unknown
4484 4484 * topology.
4485 4485 */
4486 4486 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 4487 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 4488 }
4489 4489
4490 4490 pptr->port_tmp_cnt -= dev_cnt;
4491 4491 fcp_log(CE_WARN, pptr->port_dip,
4492 4492 "!unknown/unsupported topology (0x%x)", port_top);
4493 4493 break;
4494 4494 }
4495 4495 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 4496 fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 4497 "Notify ssd of the reset to reinstate the reservations");
4498 4498
4499 4499 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 4500 &pptr->port_reset_notify_listf);
4501 4501
4502 4502 mutex_exit(&pptr->port_mutex);
4503 4503
4504 4504 break;
4505 4505
4506 4506 case FC_STATE_RESET:
4507 4507 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 4508 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 4509 fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 4510 "RESET state, waiting for Offline/Online state_cb");
4511 4511 mutex_exit(&pptr->port_mutex);
4512 4512 break;
4513 4513
4514 4514 case FC_STATE_DEVICE_CHANGE:
4515 4515 /*
4516 4516 * We come here when an application has requested
4517 4517 * Dynamic node creation/deletion in Fabric connectivity.
4518 4518 */
4519 4519 if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 4520 FCP_STATE_INIT)) {
4521 4521 /*
4522 4522 * This case can happen when the FCTL is in the
4523 4523 * process of giving us on online and the host on
4524 4524 * the other side issues a PLOGI/PLOGO. Ideally
4525 4525 * the state changes should be serialized unless
4526 4526 * they are opposite (online-offline).
4527 4527 * The transport will give us a final state change
4528 4528 * so we can ignore this for the time being.
4529 4529 */
4530 4530 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 4531 mutex_exit(&pptr->port_mutex);
4532 4532 break;
4533 4533 }
4534 4534
4535 4535 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 4536 fcp_retry_ns_registry(pptr, port_sid);
4537 4537 }
4538 4538
4539 4539 /*
4540 4540 * Extend the deadline under steady state conditions
4541 4541 * to provide more time for the device-change-commands
4542 4542 */
4543 4543 if (!pptr->port_ipkt_cnt) {
4544 4544 pptr->port_deadline = fcp_watchdog_time +
4545 4545 FCP_ICMD_DEADLINE;
4546 4546 }
4547 4547
4548 4548 /*
4549 4549 * There is another race condition here, where if we were
4550 4550 * in ONLINEING state and a devices in the map logs out,
4551 4551 * fp will give another state change as DEVICE_CHANGE
4552 4552 * and OLD. This will result in that target being offlined.
4553 4553 * The pd_handle is freed. If from the first statec callback
4554 4554 * we were going to fire a PLOGI/PRLI, the system will
4555 4555 * panic in fc_ulp_transport with invalid pd_handle.
4556 4556 * The fix is to check for the link_cnt before issuing
4557 4557 * any command down.
4558 4558 */
4559 4559 fcp_update_targets(pptr, devlist, dev_cnt,
4560 4560 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 4561
4562 4562 link_count = pptr->port_link_cnt;
4563 4563
4564 4564 fcp_handle_devices(pptr, devlist, dev_cnt,
4565 4565 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 4566
4567 4567 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 4568
4569 4569 mutex_exit(&pptr->port_mutex);
4570 4570 break;
4571 4571
4572 4572 case FC_STATE_TARGET_PORT_RESET:
4573 4573 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 4574 fcp_retry_ns_registry(pptr, port_sid);
4575 4575 }
4576 4576
4577 4577 /* Do nothing else */
4578 4578 mutex_exit(&pptr->port_mutex);
4579 4579 break;
4580 4580
4581 4581 default:
4582 4582 fcp_log(CE_WARN, pptr->port_dip,
4583 4583 "!Invalid state change=0x%x", port_state);
4584 4584 mutex_exit(&pptr->port_mutex);
4585 4585 break;
4586 4586 }
4587 4587
4588 4588 if (map_tag) {
4589 4589 kmem_free(map_tag, map_len);
4590 4590 }
4591 4591 }
4592 4592
4593 4593 /*
4594 4594 * Function: fcp_handle_devices
4595 4595 *
4596 4596 * Description: This function updates the devices currently known by
4597 4597 * walking the list provided by the caller. The list passed
4598 4598 * by the caller is supposed to be the list of reachable
4599 4599 * devices.
4600 4600 *
4601 4601 * Argument: *pptr Fcp port structure.
4602 4602 * *devlist Pointer to the first entry of a table
4603 4603 * containing the remote ports that can be
4604 4604 * reached.
4605 4605 * dev_cnt Number of entries pointed by devlist.
4606 4606 * link_cnt Link state count.
4607 4607 * *map_tag Array of fcp_map_tag_t structures.
4608 4608 * cause What caused this function to be called.
4609 4609 *
4610 4610 * Return Value: None
4611 4611 *
4612 4612 * Notes: The pptr->port_mutex must be held.
4613 4613 */
4614 4614 static void
4615 4615 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616 4616 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 4617 {
4618 4618 int i;
4619 4619 int check_finish_init = 0;
4620 4620 fc_portmap_t *map_entry;
4621 4621 struct fcp_tgt *ptgt = NULL;
4622 4622
4623 4623 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 4624 fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 4625 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 4626
4627 4627 if (dev_cnt) {
4628 4628 ASSERT(map_tag != NULL);
4629 4629 }
4630 4630
4631 4631 /*
4632 4632 * The following code goes through the list of remote ports that are
4633 4633 * accessible through this (pptr) local port (The list walked is the
4634 4634 * one provided by the caller which is the list of the remote ports
4635 4635 * currently reachable). It checks if any of them was already
4636 4636 * known by looking for the corresponding target structure based on
4637 4637 * the world wide name. If a target is part of the list it is tagged
4638 4638 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 4639 *
4640 4640 * Old comment
4641 4641 * -----------
4642 4642 * Before we drop port mutex; we MUST get the tags updated; This
4643 4643 * two step process is somewhat slow, but more reliable.
4644 4644 */
4645 4645 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 4646 map_entry = &(devlist[i]);
4647 4647
4648 4648 /*
4649 4649 * get ptr to this map entry in our port's
4650 4650 * list (if any)
4651 4651 */
4652 4652 ptgt = fcp_lookup_target(pptr,
4653 4653 (uchar_t *)&(map_entry->map_pwwn));
4654 4654
4655 4655 if (ptgt) {
4656 4656 map_tag[i] = ptgt->tgt_change_cnt;
4657 4657 if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 4658 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 4659 }
4660 4660 }
4661 4661 }
4662 4662
4663 4663 /*
4664 4664 * At this point we know which devices of the new list were already
4665 4665 * known (The field tgt_aux_state of the target structure has been
4666 4666 * set to FCP_TGT_TAGGED).
4667 4667 *
4668 4668 * The following code goes through the list of targets currently known
4669 4669 * by the local port (the list is actually a hashing table). If a
4670 4670 * target is found and is not tagged, it means the target cannot
4671 4671 * be reached anymore through the local port (pptr). It is offlined.
4672 4672 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 4673 */
4674 4674 for (i = 0; i < FCP_NUM_HASH; i++) {
4675 4675 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 4676 ptgt = ptgt->tgt_next) {
4677 4677 mutex_enter(&ptgt->tgt_mutex);
4678 4678 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 4679 (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 4680 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 4681 fcp_offline_target_now(pptr, ptgt,
4682 4682 link_cnt, ptgt->tgt_change_cnt, 0);
4683 4683 }
4684 4684 mutex_exit(&ptgt->tgt_mutex);
4685 4685 }
4686 4686 }
4687 4687
4688 4688 /*
4689 4689 * At this point, the devices that were known but cannot be reached
4690 4690 * anymore, have most likely been offlined.
4691 4691 *
4692 4692 * The following section of code seems to go through the list of
4693 4693 * remote ports that can now be reached. For every single one it
4694 4694 * checks if it is already known or if it is a new port.
4695 4695 */
4696 4696 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 4697
4698 4698 if (check_finish_init) {
4699 4699 ASSERT(i > 0);
4700 4700 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 4701 map_tag[i - 1], cause);
4702 4702 check_finish_init = 0;
4703 4703 }
4704 4704
4705 4705 /* get a pointer to this map entry */
4706 4706 map_entry = &(devlist[i]);
4707 4707
4708 4708 /*
4709 4709 * Check for the duplicate map entry flag. If we have marked
4710 4710 * this entry as a duplicate we skip it since the correct
4711 4711 * (perhaps even same) state change will be encountered
4712 4712 * later in the list.
4713 4713 */
4714 4714 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 4715 continue;
4716 4716 }
4717 4717
4718 4718 /* get ptr to this map entry in our port's list (if any) */
4719 4719 ptgt = fcp_lookup_target(pptr,
4720 4720 (uchar_t *)&(map_entry->map_pwwn));
4721 4721
4722 4722 if (ptgt) {
4723 4723 /*
4724 4724 * This device was already known. The field
4725 4725 * tgt_aux_state is reset (was probably set to
4726 4726 * FCP_TGT_TAGGED previously in this routine).
4727 4727 */
4728 4728 ptgt->tgt_aux_state = 0;
4729 4729 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 4730 fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 4731 "handle_devices: map did/state/type/flags = "
4732 4732 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 4733 "tgt_state=%d",
4734 4734 map_entry->map_did.port_id, map_entry->map_state,
4735 4735 map_entry->map_type, map_entry->map_flags,
4736 4736 ptgt->tgt_d_id, ptgt->tgt_state);
4737 4737 }
4738 4738
4739 4739 if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 4740 map_entry->map_type == PORT_DEVICE_NEW ||
4741 4741 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 4742 map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 4743 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 4744 fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 4745 "map_type=%x, did = %x",
4746 4746 map_entry->map_type,
4747 4747 map_entry->map_did.port_id);
4748 4748 }
4749 4749
4750 4750 switch (map_entry->map_type) {
4751 4751 case PORT_DEVICE_NOCHANGE:
4752 4752 case PORT_DEVICE_USER_CREATE:
4753 4753 case PORT_DEVICE_USER_LOGIN:
4754 4754 case PORT_DEVICE_NEW:
4755 4755 case PORT_DEVICE_REPORTLUN_CHANGED:
4756 4756 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 4757
4758 4758 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 4759 link_cnt, (ptgt) ? map_tag[i] : 0,
4760 4760 cause) == TRUE) {
4761 4761
4762 4762 FCP_TGT_TRACE(ptgt, map_tag[i],
4763 4763 FCP_TGT_TRACE_2);
4764 4764 check_finish_init++;
4765 4765 }
4766 4766 break;
4767 4767
4768 4768 case PORT_DEVICE_OLD:
4769 4769 if (ptgt != NULL) {
4770 4770 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4771 FCP_TGT_TRACE_3);
4772 4772
4773 4773 mutex_enter(&ptgt->tgt_mutex);
4774 4774 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 4775 /*
4776 4776 * Must do an in-line wait for I/Os
↓ open down ↓ |
333 lines elided |
↑ open up ↑ |
4777 4777 * to get drained
4778 4778 */
4779 4779 mutex_exit(&ptgt->tgt_mutex);
4780 4780 mutex_exit(&pptr->port_mutex);
4781 4781
4782 4782 mutex_enter(&ptgt->tgt_mutex);
4783 4783 while (ptgt->tgt_ipkt_cnt ||
4784 4784 fcp_outstanding_lun_cmds(ptgt)
4785 4785 == FC_SUCCESS) {
4786 4786 mutex_exit(&ptgt->tgt_mutex);
4787 - delay(drv_usectohz(1000000));
4787 + delay(drv_sectohz(1));
4788 4788 mutex_enter(&ptgt->tgt_mutex);
4789 4789 }
4790 4790 mutex_exit(&ptgt->tgt_mutex);
4791 4791
4792 4792 mutex_enter(&pptr->port_mutex);
4793 4793 mutex_enter(&ptgt->tgt_mutex);
4794 4794
4795 4795 (void) fcp_offline_target(pptr, ptgt,
4796 4796 link_cnt, map_tag[i], 0, 0);
4797 4797 }
4798 4798 mutex_exit(&ptgt->tgt_mutex);
4799 4799 }
4800 4800 check_finish_init++;
4801 4801 break;
4802 4802
4803 4803 case PORT_DEVICE_USER_DELETE:
4804 4804 case PORT_DEVICE_USER_LOGOUT:
4805 4805 if (ptgt != NULL) {
4806 4806 FCP_TGT_TRACE(ptgt, map_tag[i],
4807 4807 FCP_TGT_TRACE_4);
4808 4808
4809 4809 mutex_enter(&ptgt->tgt_mutex);
4810 4810 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 4811 (void) fcp_offline_target(pptr, ptgt,
4812 4812 link_cnt, map_tag[i], 1, 0);
4813 4813 }
4814 4814 mutex_exit(&ptgt->tgt_mutex);
4815 4815 }
4816 4816 check_finish_init++;
4817 4817 break;
4818 4818
4819 4819 case PORT_DEVICE_CHANGED:
4820 4820 if (ptgt != NULL) {
4821 4821 FCP_TGT_TRACE(ptgt, map_tag[i],
4822 4822 FCP_TGT_TRACE_5);
4823 4823
4824 4824 if (fcp_device_changed(pptr, ptgt,
4825 4825 map_entry, link_cnt, map_tag[i],
4826 4826 cause) == TRUE) {
4827 4827 check_finish_init++;
4828 4828 }
4829 4829 } else {
4830 4830 if (fcp_handle_mapflags(pptr, ptgt,
4831 4831 map_entry, link_cnt, 0, cause) == TRUE) {
4832 4832 check_finish_init++;
4833 4833 }
4834 4834 }
4835 4835 break;
4836 4836
4837 4837 default:
4838 4838 fcp_log(CE_WARN, pptr->port_dip,
4839 4839 "!Invalid map_type=0x%x", map_entry->map_type);
4840 4840 check_finish_init++;
4841 4841 break;
4842 4842 }
4843 4843 }
4844 4844
4845 4845 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 4846 ASSERT(i > 0);
4847 4847 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 4848 map_tag[i-1], cause);
4849 4849 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 4850 fcp_offline_all(pptr, link_cnt, cause);
4851 4851 }
4852 4852 }
4853 4853
4854 4854 static int
4855 4855 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 4856 {
4857 4857 struct fcp_lun *plun;
4858 4858 struct fcp_port *pptr;
4859 4859 int rscn_count;
4860 4860 int lun0_newalloc;
4861 4861 int ret = TRUE;
4862 4862
4863 4863 ASSERT(ptgt);
4864 4864 pptr = ptgt->tgt_port;
4865 4865 lun0_newalloc = 0;
4866 4866 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 4867 /*
4868 4868 * no LUN struct for LUN 0 yet exists,
4869 4869 * so create one
4870 4870 */
4871 4871 plun = fcp_alloc_lun(ptgt);
4872 4872 if (plun == NULL) {
4873 4873 fcp_log(CE_WARN, pptr->port_dip,
4874 4874 "!Failed to allocate lun 0 for"
4875 4875 " D_ID=%x", ptgt->tgt_d_id);
4876 4876 return (ret);
4877 4877 }
4878 4878 lun0_newalloc = 1;
4879 4879 }
4880 4880
4881 4881 mutex_enter(&ptgt->tgt_mutex);
4882 4882 /*
4883 4883 * consider lun 0 as device not connected if it is
4884 4884 * offlined or newly allocated
4885 4885 */
4886 4886 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 4887 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 4888 }
4889 4889 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 4890 plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 4891 ptgt->tgt_lun_cnt = 1;
4892 4892 ptgt->tgt_report_lun_cnt = 0;
4893 4893 mutex_exit(&ptgt->tgt_mutex);
4894 4894
4895 4895 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 4896 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 4897 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 4898 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 4899 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 4900 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 4901 "to D_ID=%x", ptgt->tgt_d_id);
4902 4902 } else {
4903 4903 ret = FALSE;
4904 4904 }
4905 4905
4906 4906 return (ret);
4907 4907 }
4908 4908
4909 4909 /*
4910 4910 * Function: fcp_handle_mapflags
4911 4911 *
4912 4912 * Description: This function creates a target structure if the ptgt passed
4913 4913 * is NULL. It also kicks off the PLOGI if we are not logged
4914 4914 * into the target yet or the PRLI if we are logged into the
4915 4915 * target already. The rest of the treatment is done in the
4916 4916 * callbacks of the PLOGI or PRLI.
4917 4917 *
4918 4918 * Argument: *pptr FCP Port structure.
4919 4919 * *ptgt Target structure.
4920 4920 * *map_entry Array of fc_portmap_t structures.
4921 4921 * link_cnt Link state count.
4922 4922 * tgt_cnt Target state count.
4923 4923 * cause What caused this function to be called.
4924 4924 *
4925 4925 * Return Value: TRUE Failed
4926 4926 * FALSE Succeeded
4927 4927 *
4928 4928 * Notes: pptr->port_mutex must be owned.
4929 4929 */
4930 4930 static int
4931 4931 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4932 4932 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 4933 {
4934 4934 int lcount;
4935 4935 int tcount;
4936 4936 int ret = TRUE;
4937 4937 int alloc;
4938 4938 struct fcp_ipkt *icmd;
4939 4939 struct fcp_lun *pseq_lun = NULL;
4940 4940 uchar_t opcode;
4941 4941 int valid_ptgt_was_passed = FALSE;
4942 4942
4943 4943 ASSERT(mutex_owned(&pptr->port_mutex));
4944 4944
4945 4945 /*
4946 4946 * This case is possible where the FCTL has come up and done discovery
4947 4947 * before FCP was loaded and attached. FCTL would have discovered the
4948 4948 * devices and later the ULP came online. In this case ULP's would get
4949 4949 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 4950 */
4951 4951 if (ptgt == NULL) {
4952 4952 /* don't already have a target */
4953 4953 mutex_exit(&pptr->port_mutex);
4954 4954 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 4955 mutex_enter(&pptr->port_mutex);
4956 4956
4957 4957 if (ptgt == NULL) {
4958 4958 fcp_log(CE_WARN, pptr->port_dip,
4959 4959 "!FC target allocation failed");
4960 4960 return (ret);
4961 4961 }
4962 4962 mutex_enter(&ptgt->tgt_mutex);
4963 4963 ptgt->tgt_statec_cause = cause;
4964 4964 ptgt->tgt_tmp_cnt = 1;
4965 4965 mutex_exit(&ptgt->tgt_mutex);
4966 4966 } else {
4967 4967 valid_ptgt_was_passed = TRUE;
4968 4968 }
4969 4969
4970 4970 /*
4971 4971 * Copy in the target parameters
4972 4972 */
4973 4973 mutex_enter(&ptgt->tgt_mutex);
4974 4974 ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 4975 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 4976 ptgt->tgt_pd_handle = map_entry->map_pd;
4977 4977 ptgt->tgt_fca_dev = NULL;
4978 4978
4979 4979 /* Copy port and node WWNs */
4980 4980 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 4981 FC_WWN_SIZE);
4982 4982 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 4983 FC_WWN_SIZE);
4984 4984
4985 4985 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 4986 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 4987 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 4988 valid_ptgt_was_passed) {
4989 4989 /*
4990 4990 * determine if there are any tape LUNs on this target
4991 4991 */
4992 4992 for (pseq_lun = ptgt->tgt_lun;
4993 4993 pseq_lun != NULL;
4994 4994 pseq_lun = pseq_lun->lun_next) {
4995 4995 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 4996 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 4997 fcp_update_tgt_state(ptgt, FCP_RESET,
4998 4998 FCP_LUN_MARK);
4999 4999 mutex_exit(&ptgt->tgt_mutex);
5000 5000 return (ret);
5001 5001 }
5002 5002 }
5003 5003 }
5004 5004
5005 5005 /*
5006 5006 * if UA'REPORT_LUN_CHANGED received,
5007 5007 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 5008 */
5009 5009 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 5010 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 5011 mutex_exit(&ptgt->tgt_mutex);
5012 5012 mutex_exit(&pptr->port_mutex);
5013 5013
5014 5014 ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 5015
5016 5016 mutex_enter(&pptr->port_mutex);
5017 5017 return (ret);
5018 5018 }
5019 5019
5020 5020 /*
5021 5021 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 5022 * was never specifically initialized but zeroed out which means
5023 5023 * FCP_TGT_NODE_NONE.
5024 5024 */
5025 5025 switch (ptgt->tgt_node_state) {
5026 5026 case FCP_TGT_NODE_NONE:
5027 5027 case FCP_TGT_NODE_ON_DEMAND:
5028 5028 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 5029 !fcp_enable_auto_configuration &&
5030 5030 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 5031 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 5032 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 5033 fcp_enable_auto_configuration &&
5034 5034 (ptgt->tgt_manual_config_only == 1) &&
5035 5035 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 5036 /*
5037 5037 * If auto configuration is set and
5038 5038 * the tgt_manual_config_only flag is set then
5039 5039 * we only want the user to be able to change
5040 5040 * the state through create_on_demand.
5041 5041 */
5042 5042 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 5043 } else {
5044 5044 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 5045 }
5046 5046 break;
5047 5047
5048 5048 case FCP_TGT_NODE_PRESENT:
5049 5049 break;
5050 5050 }
5051 5051 /*
5052 5052 * If we are booting from a fabric device, make sure we
5053 5053 * mark the node state appropriately for this target to be
5054 5054 * enumerated
5055 5055 */
5056 5056 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 5057 if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 5058 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 5059 sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 5060 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 5061 }
5062 5062 }
5063 5063 mutex_exit(&ptgt->tgt_mutex);
5064 5064
5065 5065 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 5066 fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 5067 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 5068 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 5069 map_entry->map_rscn_info.ulp_rscn_count);
5070 5070
5071 5071 mutex_enter(&ptgt->tgt_mutex);
5072 5072
5073 5073 /*
5074 5074 * Reset target OFFLINE state and mark the target BUSY
5075 5075 */
5076 5076 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 5077 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 5078
5079 5079 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 5080 lcount = link_cnt;
5081 5081
5082 5082 mutex_exit(&ptgt->tgt_mutex);
5083 5083 mutex_exit(&pptr->port_mutex);
5084 5084
5085 5085 /*
5086 5086 * if we are already logged in, then we do a PRLI, else
5087 5087 * we do a PLOGI first (to get logged in)
5088 5088 *
5089 5089 * We will not check if we are the PLOGI initiator
5090 5090 */
5091 5091 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 5092 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 5093
5094 5094 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 5095
5096 5096 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 5097 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 5098 cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 5099
5100 5100 if (icmd == NULL) {
5101 5101 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 5102 /*
5103 5103 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 5104 * we need to make sure we reacquire it before returning.
5105 5105 */
5106 5106 mutex_enter(&pptr->port_mutex);
5107 5107 return (FALSE);
5108 5108 }
5109 5109
5110 5110 /* TRUE is only returned while target is intended skipped */
5111 5111 ret = FALSE;
5112 5112 /* discover info about this target */
5113 5113 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 5114 lcount, tcount, cause)) == DDI_SUCCESS) {
5115 5115 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 5116 } else {
5117 5117 fcp_icmd_free(pptr, icmd);
5118 5118 ret = TRUE;
5119 5119 }
5120 5120 mutex_enter(&pptr->port_mutex);
5121 5121
5122 5122 return (ret);
5123 5123 }
5124 5124
5125 5125 /*
5126 5126 * Function: fcp_send_els
5127 5127 *
5128 5128 * Description: Sends an ELS to the target specified by the caller. Supports
5129 5129 * PLOGI and PRLI.
5130 5130 *
5131 5131 * Argument: *pptr Fcp port.
5132 5132 * *ptgt Target to send the ELS to.
5133 5133 * *icmd Internal packet
5134 5134 * opcode ELS opcode
5135 5135 * lcount Link state change counter
5136 5136 * tcount Target state change counter
5137 5137 * cause What caused the call
5138 5138 *
5139 5139 * Return Value: DDI_SUCCESS
5140 5140 * Others
5141 5141 */
5142 5142 static int
5143 5143 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144 5144 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 5145 {
5146 5146 fc_packet_t *fpkt;
5147 5147 fc_frame_hdr_t *hp;
5148 5148 int internal = 0;
5149 5149 int alloc;
5150 5150 int cmd_len;
5151 5151 int resp_len;
5152 5152 int res = DDI_FAILURE; /* default result */
5153 5153 int rval = DDI_FAILURE;
5154 5154
5155 5155 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 5156 ASSERT(ptgt->tgt_port == pptr);
5157 5157
5158 5158 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 5159 fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 5160 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 5161 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 5162
5163 5163 if (opcode == LA_ELS_PLOGI) {
5164 5164 cmd_len = sizeof (la_els_logi_t);
5165 5165 resp_len = sizeof (la_els_logi_t);
5166 5166 } else {
5167 5167 ASSERT(opcode == LA_ELS_PRLI);
5168 5168 cmd_len = sizeof (la_els_prli_t);
5169 5169 resp_len = sizeof (la_els_prli_t);
5170 5170 }
5171 5171
5172 5172 if (icmd == NULL) {
5173 5173 alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 5174 sizeof (la_els_prli_t));
5175 5175 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 5176 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 5177 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 5178 if (icmd == NULL) {
5179 5179 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 5180 return (res);
5181 5181 }
5182 5182 internal++;
5183 5183 }
5184 5184 fpkt = icmd->ipkt_fpkt;
5185 5185
5186 5186 fpkt->pkt_cmdlen = cmd_len;
5187 5187 fpkt->pkt_rsplen = resp_len;
5188 5188 fpkt->pkt_datalen = 0;
5189 5189 icmd->ipkt_retries = 0;
5190 5190
5191 5191 /* fill in fpkt info */
5192 5192 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 5193 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 5194 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 5195
5196 5196 /* get ptr to frame hdr in fpkt */
5197 5197 hp = &fpkt->pkt_cmd_fhdr;
5198 5198
5199 5199 /*
5200 5200 * fill in frame hdr
5201 5201 */
5202 5202 hp->r_ctl = R_CTL_ELS_REQ;
5203 5203 hp->s_id = pptr->port_id; /* source ID */
5204 5204 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5205 5205 hp->type = FC_TYPE_EXTENDED_LS;
5206 5206 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 5207 hp->seq_id = 0;
5208 5208 hp->rsvd = 0;
5209 5209 hp->df_ctl = 0;
5210 5210 hp->seq_cnt = 0;
5211 5211 hp->ox_id = 0xffff; /* i.e. none */
5212 5212 hp->rx_id = 0xffff; /* i.e. none */
5213 5213 hp->ro = 0;
5214 5214
5215 5215 /*
5216 5216 * at this point we have a filled in cmd pkt
5217 5217 *
5218 5218 * fill in the respective info, then use the transport to send
5219 5219 * the packet
5220 5220 *
5221 5221 * for a PLOGI call fc_ulp_login(), and
5222 5222 * for a PRLI call fc_ulp_issue_els()
5223 5223 */
5224 5224 switch (opcode) {
5225 5225 case LA_ELS_PLOGI: {
5226 5226 struct la_els_logi logi;
5227 5227
5228 5228 bzero(&logi, sizeof (struct la_els_logi));
5229 5229
5230 5230 hp = &fpkt->pkt_cmd_fhdr;
5231 5231 hp->r_ctl = R_CTL_ELS_REQ;
5232 5232 logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 5233 logi.ls_code.mbz = 0;
5234 5234
5235 5235 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 5236 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 5237
5238 5238 icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 5239
5240 5240 mutex_enter(&pptr->port_mutex);
5241 5241 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 5242
5243 5243 mutex_exit(&pptr->port_mutex);
5244 5244
5245 5245 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 5246 if (rval == FC_SUCCESS) {
5247 5247 res = DDI_SUCCESS;
5248 5248 break;
5249 5249 }
5250 5250
5251 5251 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 5252
5253 5253 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 5254 rval, "PLOGI");
5255 5255 } else {
5256 5256 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 5257 fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 5258 "fcp_send_els1: state change occured"
5259 5259 " for D_ID=0x%x", ptgt->tgt_d_id);
5260 5260 mutex_exit(&pptr->port_mutex);
5261 5261 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 5262 }
5263 5263 break;
5264 5264 }
5265 5265
5266 5266 case LA_ELS_PRLI: {
5267 5267 struct la_els_prli prli;
5268 5268 struct fcp_prli *fprli;
5269 5269
5270 5270 bzero(&prli, sizeof (struct la_els_prli));
5271 5271
5272 5272 hp = &fpkt->pkt_cmd_fhdr;
5273 5273 hp->r_ctl = R_CTL_ELS_REQ;
5274 5274
5275 5275 /* fill in PRLI cmd ELS fields */
5276 5276 prli.ls_code = LA_ELS_PRLI;
5277 5277 prli.page_length = 0x10; /* huh? */
5278 5278 prli.payload_length = sizeof (struct la_els_prli);
5279 5279
5280 5280 icmd->ipkt_opcode = LA_ELS_PRLI;
5281 5281
5282 5282 /* get ptr to PRLI service params */
5283 5283 fprli = (struct fcp_prli *)prli.service_params;
5284 5284
5285 5285 /* fill in service params */
5286 5286 fprli->type = 0x08;
5287 5287 fprli->resvd1 = 0;
5288 5288 fprli->orig_process_assoc_valid = 0;
5289 5289 fprli->resp_process_assoc_valid = 0;
5290 5290 fprli->establish_image_pair = 1;
5291 5291 fprli->resvd2 = 0;
5292 5292 fprli->resvd3 = 0;
5293 5293 fprli->obsolete_1 = 0;
5294 5294 fprli->obsolete_2 = 0;
5295 5295 fprli->data_overlay_allowed = 0;
5296 5296 fprli->initiator_fn = 1;
5297 5297 fprli->confirmed_compl_allowed = 1;
5298 5298
5299 5299 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 5300 fprli->target_fn = 1;
5301 5301 } else {
5302 5302 fprli->target_fn = 0;
5303 5303 }
5304 5304
5305 5305 fprli->retry = 1;
5306 5306 fprli->read_xfer_rdy_disabled = 1;
5307 5307 fprli->write_xfer_rdy_disabled = 0;
5308 5308
5309 5309 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 5310 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 5311
5312 5312 /* issue the PRLI request */
5313 5313
5314 5314 mutex_enter(&pptr->port_mutex);
5315 5315 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 5316
5317 5317 mutex_exit(&pptr->port_mutex);
5318 5318
5319 5319 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 5320 if (rval == FC_SUCCESS) {
5321 5321 res = DDI_SUCCESS;
5322 5322 break;
5323 5323 }
5324 5324
5325 5325 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 5326
5327 5327 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 5328 rval, "PRLI");
5329 5329 } else {
5330 5330 mutex_exit(&pptr->port_mutex);
5331 5331 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 5332 }
5333 5333 break;
5334 5334 }
5335 5335
5336 5336 default:
5337 5337 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 5338 break;
5339 5339 }
5340 5340
5341 5341 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 5342 fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 5343 "fcp_send_els: returning %d", res);
5344 5344
5345 5345 if (res != DDI_SUCCESS) {
5346 5346 if (internal) {
5347 5347 fcp_icmd_free(pptr, icmd);
5348 5348 }
5349 5349 }
5350 5350
5351 5351 return (res);
5352 5352 }
5353 5353
5354 5354
5355 5355 /*
5356 5356 * called internally update the state of all of the tgts and each LUN
5357 5357 * for this port (i.e. each target known to be attached to this port)
5358 5358 * if they are not already offline
5359 5359 *
5360 5360 * must be called with the port mutex owned
5361 5361 *
5362 5362 * acquires and releases the target mutexes for each target attached
5363 5363 * to this port
5364 5364 */
5365 5365 void
5366 5366 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 5367 {
5368 5368 int i;
5369 5369 struct fcp_tgt *ptgt;
5370 5370
5371 5371 ASSERT(mutex_owned(&pptr->port_mutex));
5372 5372
5373 5373 for (i = 0; i < FCP_NUM_HASH; i++) {
5374 5374 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 5375 ptgt = ptgt->tgt_next) {
5376 5376 mutex_enter(&ptgt->tgt_mutex);
5377 5377 fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 5378 ptgt->tgt_change_cnt++;
5379 5379 ptgt->tgt_statec_cause = cause;
5380 5380 ptgt->tgt_tmp_cnt = 1;
5381 5381 ptgt->tgt_done = 0;
5382 5382 mutex_exit(&ptgt->tgt_mutex);
5383 5383 }
5384 5384 }
5385 5385 }
5386 5386
5387 5387
5388 5388 static void
5389 5389 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 5390 {
5391 5391 int i;
5392 5392 int ndevs;
5393 5393 struct fcp_tgt *ptgt;
5394 5394
5395 5395 ASSERT(mutex_owned(&pptr->port_mutex));
5396 5396
5397 5397 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 5398 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 5399 ptgt = ptgt->tgt_next) {
5400 5400 ndevs++;
5401 5401 }
5402 5402 }
5403 5403
5404 5404 if (ndevs == 0) {
5405 5405 return;
5406 5406 }
5407 5407 pptr->port_tmp_cnt = ndevs;
5408 5408
5409 5409 for (i = 0; i < FCP_NUM_HASH; i++) {
5410 5410 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 5411 ptgt = ptgt->tgt_next) {
5412 5412 (void) fcp_call_finish_init_held(pptr, ptgt,
5413 5413 lcount, ptgt->tgt_change_cnt, cause);
5414 5414 }
5415 5415 }
5416 5416 }
5417 5417
5418 5418 /*
5419 5419 * Function: fcp_update_tgt_state
5420 5420 *
5421 5421 * Description: This function updates the field tgt_state of a target. That
5422 5422 * field is a bitmap and which bit can be set or reset
5423 5423 * individually. The action applied to the target state is also
5424 5424 * applied to all the LUNs belonging to the target (provided the
5425 5425 * LUN is not offline). A side effect of applying the state
5426 5426 * modification to the target and the LUNs is the field tgt_trace
5427 5427 * of the target and lun_trace of the LUNs is set to zero.
5428 5428 *
5429 5429 *
5430 5430 * Argument: *ptgt Target structure.
5431 5431 * flag Flag indication what action to apply (set/reset).
5432 5432 * state State bits to update.
5433 5433 *
5434 5434 * Return Value: None
5435 5435 *
5436 5436 * Context: Interrupt, Kernel or User context.
5437 5437 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5438 5438 * calling this function.
5439 5439 */
5440 5440 void
5441 5441 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 5442 {
5443 5443 struct fcp_lun *plun;
5444 5444
5445 5445 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 5446
5447 5447 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 5448 /* The target is not offline. */
5449 5449 if (flag == FCP_SET) {
5450 5450 ptgt->tgt_state |= state;
5451 5451 ptgt->tgt_trace = 0;
5452 5452 } else {
5453 5453 ptgt->tgt_state &= ~state;
5454 5454 }
5455 5455
5456 5456 for (plun = ptgt->tgt_lun; plun != NULL;
5457 5457 plun = plun->lun_next) {
5458 5458 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 5459 /* The LUN is not offline. */
5460 5460 if (flag == FCP_SET) {
5461 5461 plun->lun_state |= state;
5462 5462 plun->lun_trace = 0;
5463 5463 } else {
5464 5464 plun->lun_state &= ~state;
5465 5465 }
5466 5466 }
5467 5467 }
5468 5468 }
5469 5469 }
5470 5470
5471 5471 /*
5472 5472 * Function: fcp_update_tgt_state
5473 5473 *
5474 5474 * Description: This function updates the field lun_state of a LUN. That
5475 5475 * field is a bitmap and which bit can be set or reset
5476 5476 * individually.
5477 5477 *
5478 5478 * Argument: *plun LUN structure.
5479 5479 * flag Flag indication what action to apply (set/reset).
5480 5480 * state State bits to update.
5481 5481 *
5482 5482 * Return Value: None
5483 5483 *
5484 5484 * Context: Interrupt, Kernel or User context.
5485 5485 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5486 5486 * calling this function.
5487 5487 */
5488 5488 void
5489 5489 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 5490 {
5491 5491 struct fcp_tgt *ptgt = plun->lun_tgt;
5492 5492
5493 5493 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 5494
5495 5495 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 5496 if (flag == FCP_SET) {
5497 5497 plun->lun_state |= state;
5498 5498 } else {
5499 5499 plun->lun_state &= ~state;
5500 5500 }
5501 5501 }
5502 5502 }
5503 5503
5504 5504 /*
5505 5505 * Function: fcp_get_port
5506 5506 *
5507 5507 * Description: This function returns the fcp_port structure from the opaque
5508 5508 * handle passed by the caller. That opaque handle is the handle
5509 5509 * used by fp/fctl to identify a particular local port. That
5510 5510 * handle has been stored in the corresponding fcp_port
5511 5511 * structure. This function is going to walk the global list of
5512 5512 * fcp_port structures till one has a port_fp_handle that matches
5513 5513 * the handle passed by the caller. This function enters the
5514 5514 * mutex fcp_global_mutex while walking the global list and then
5515 5515 * releases it.
5516 5516 *
5517 5517 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5518 5518 * particular port.
5519 5519 *
5520 5520 * Return Value: NULL Not found.
5521 5521 * Not NULL Pointer to the fcp_port structure.
5522 5522 *
5523 5523 * Context: Interrupt, Kernel or User context.
5524 5524 */
5525 5525 static struct fcp_port *
5526 5526 fcp_get_port(opaque_t port_handle)
5527 5527 {
5528 5528 struct fcp_port *pptr;
5529 5529
5530 5530 ASSERT(port_handle != NULL);
5531 5531
5532 5532 mutex_enter(&fcp_global_mutex);
5533 5533 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 5534 if (pptr->port_fp_handle == port_handle) {
5535 5535 break;
5536 5536 }
5537 5537 }
5538 5538 mutex_exit(&fcp_global_mutex);
5539 5539
5540 5540 return (pptr);
5541 5541 }
5542 5542
5543 5543
5544 5544 static void
5545 5545 fcp_unsol_callback(fc_packet_t *fpkt)
5546 5546 {
5547 5547 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 5548 struct fcp_port *pptr = icmd->ipkt_port;
5549 5549
5550 5550 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 5551 caddr_t state, reason, action, expln;
5552 5552
5553 5553 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 5554 &action, &expln);
5555 5555
5556 5556 fcp_log(CE_WARN, pptr->port_dip,
5557 5557 "!couldn't post response to unsolicited request: "
5558 5558 " state=%s reason=%s rx_id=%x ox_id=%x",
5559 5559 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 5560 fpkt->pkt_cmd_fhdr.rx_id);
5561 5561 }
5562 5562 fcp_icmd_free(pptr, icmd);
5563 5563 }
5564 5564
5565 5565
5566 5566 /*
5567 5567 * Perform general purpose preparation of a response to an unsolicited request
5568 5568 */
5569 5569 static void
5570 5570 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571 5571 uchar_t r_ctl, uchar_t type)
5572 5572 {
5573 5573 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 5574 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 5575 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 5576 pkt->pkt_cmd_fhdr.type = type;
5577 5577 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 5578 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 5579 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5580 5580 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 5581 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 5582 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 5583 pkt->pkt_cmd_fhdr.ro = 0;
5584 5584 pkt->pkt_cmd_fhdr.rsvd = 0;
5585 5585 pkt->pkt_comp = fcp_unsol_callback;
5586 5586 pkt->pkt_pd = NULL;
5587 5587 pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 5588 }
5589 5589
5590 5590
5591 5591 /*ARGSUSED*/
5592 5592 static int
5593 5593 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 5594 {
5595 5595 fc_packet_t *fpkt;
5596 5596 struct la_els_prli prli;
5597 5597 struct fcp_prli *fprli;
5598 5598 struct fcp_ipkt *icmd;
5599 5599 struct la_els_prli *from;
5600 5600 struct fcp_prli *orig;
5601 5601 struct fcp_tgt *ptgt;
5602 5602 int tcount = 0;
5603 5603 int lcount;
5604 5604
5605 5605 from = (struct la_els_prli *)buf->ub_buffer;
5606 5606 orig = (struct fcp_prli *)from->service_params;
5607 5607 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 5608 NULL) {
5609 5609 mutex_enter(&ptgt->tgt_mutex);
5610 5610 tcount = ptgt->tgt_change_cnt;
5611 5611 mutex_exit(&ptgt->tgt_mutex);
5612 5612 }
5613 5613
5614 5614 mutex_enter(&pptr->port_mutex);
5615 5615 lcount = pptr->port_link_cnt;
5616 5616 mutex_exit(&pptr->port_mutex);
5617 5617
5618 5618 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 5619 sizeof (la_els_prli_t), 0,
5620 5620 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 5621 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 5622 return (FC_FAILURE);
5623 5623 }
5624 5624
5625 5625 fpkt = icmd->ipkt_fpkt;
5626 5626 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 5627 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 5628 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 5629 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 5630 fpkt->pkt_rsplen = 0;
5631 5631 fpkt->pkt_datalen = 0;
5632 5632
5633 5633 icmd->ipkt_opcode = LA_ELS_PRLI;
5634 5634
5635 5635 bzero(&prli, sizeof (struct la_els_prli));
5636 5636 fprli = (struct fcp_prli *)prli.service_params;
5637 5637 prli.ls_code = LA_ELS_ACC;
5638 5638 prli.page_length = 0x10;
5639 5639 prli.payload_length = sizeof (struct la_els_prli);
5640 5640
5641 5641 /* fill in service params */
5642 5642 fprli->type = 0x08;
5643 5643 fprli->resvd1 = 0;
5644 5644 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 5645 fprli->orig_process_associator = orig->orig_process_associator;
5646 5646 fprli->resp_process_assoc_valid = 0;
5647 5647 fprli->establish_image_pair = 1;
5648 5648 fprli->resvd2 = 0;
5649 5649 fprli->resvd3 = 0;
5650 5650 fprli->obsolete_1 = 0;
5651 5651 fprli->obsolete_2 = 0;
5652 5652 fprli->data_overlay_allowed = 0;
5653 5653 fprli->initiator_fn = 1;
5654 5654 fprli->confirmed_compl_allowed = 1;
5655 5655
5656 5656 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 5657 fprli->target_fn = 1;
5658 5658 } else {
5659 5659 fprli->target_fn = 0;
5660 5660 }
5661 5661
5662 5662 fprli->retry = 1;
5663 5663 fprli->read_xfer_rdy_disabled = 1;
5664 5664 fprli->write_xfer_rdy_disabled = 0;
5665 5665
5666 5666 /* save the unsol prli payload first */
5667 5667 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 5668 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 5669
5670 5670 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 5671 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 5672
5673 5673 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 5674
5675 5675 mutex_enter(&pptr->port_mutex);
5676 5676 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 5677 int rval;
5678 5678 mutex_exit(&pptr->port_mutex);
5679 5679
5680 5680 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 5681 FC_SUCCESS) {
5682 5682 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 5683 ptgt != NULL) {
5684 5684 fcp_queue_ipkt(pptr, fpkt);
5685 5685 return (FC_SUCCESS);
5686 5686 }
5687 5687 /* Let it timeout */
5688 5688 fcp_icmd_free(pptr, icmd);
5689 5689 return (FC_FAILURE);
5690 5690 }
5691 5691 } else {
5692 5692 mutex_exit(&pptr->port_mutex);
5693 5693 fcp_icmd_free(pptr, icmd);
5694 5694 return (FC_FAILURE);
5695 5695 }
5696 5696
5697 5697 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 5698
5699 5699 return (FC_SUCCESS);
5700 5700 }
5701 5701
5702 5702 /*
5703 5703 * Function: fcp_icmd_alloc
5704 5704 *
5705 5705 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5706 5706 * field is initialized to fcp_icmd_callback. Sometimes it is
5707 5707 * modified by the caller (such as fcp_send_scsi). The
5708 5708 * structure is also tied to the state of the line and of the
5709 5709 * target at a particular time. That link is established by
5710 5710 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711 5711 * and tcount which came respectively from pptr->link_cnt and
5712 5712 * ptgt->tgt_change_cnt.
5713 5713 *
5714 5714 * Argument: *pptr Fcp port.
5715 5715 * *ptgt Target (destination of the command).
5716 5716 * cmd_len Length of the command.
5717 5717 * resp_len Length of the expected response.
5718 5718 * data_len Length of the data.
5719 5719 * nodma Indicates weither the command and response.
5720 5720 * will be transfer through DMA or not.
5721 5721 * lcount Link state change counter.
5722 5722 * tcount Target state change counter.
5723 5723 * cause Reason that lead to this call.
5724 5724 *
5725 5725 * Return Value: NULL Failed.
5726 5726 * Not NULL Internal packet address.
5727 5727 */
5728 5728 static struct fcp_ipkt *
5729 5729 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730 5730 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731 5731 uint32_t rscn_count)
5732 5732 {
5733 5733 int dma_setup = 0;
5734 5734 fc_packet_t *fpkt;
5735 5735 struct fcp_ipkt *icmd = NULL;
5736 5736
5737 5737 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 5738 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 5739 KM_NOSLEEP);
5740 5740 if (icmd == NULL) {
5741 5741 fcp_log(CE_WARN, pptr->port_dip,
5742 5742 "!internal packet allocation failed");
5743 5743 return (NULL);
5744 5744 }
5745 5745
5746 5746 /*
5747 5747 * initialize the allocated packet
5748 5748 */
5749 5749 icmd->ipkt_nodma = nodma;
5750 5750 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 5751 icmd->ipkt_lun = NULL;
5752 5752
5753 5753 icmd->ipkt_link_cnt = lcount;
5754 5754 icmd->ipkt_change_cnt = tcount;
5755 5755 icmd->ipkt_cause = cause;
5756 5756
5757 5757 mutex_enter(&pptr->port_mutex);
5758 5758 icmd->ipkt_port = pptr;
5759 5759 mutex_exit(&pptr->port_mutex);
5760 5760
5761 5761 /* keep track of amt of data to be sent in pkt */
5762 5762 icmd->ipkt_cmdlen = cmd_len;
5763 5763 icmd->ipkt_resplen = resp_len;
5764 5764 icmd->ipkt_datalen = data_len;
5765 5765
5766 5766 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 5767 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 5768
5769 5769 /* set pkt's private ptr to point to cmd pkt */
5770 5770 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 5771
5772 5772 /* set FCA private ptr to memory just beyond */
5773 5773 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 5774 ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 5775 pptr->port_dmacookie_sz);
5776 5776
5777 5777 /* get ptr to fpkt substruct and fill it in */
5778 5778 fpkt = icmd->ipkt_fpkt;
5779 5779 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 5780 sizeof (struct fcp_ipkt));
5781 5781
5782 5782 if (ptgt != NULL) {
5783 5783 icmd->ipkt_tgt = ptgt;
5784 5784 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 5785 }
5786 5786
5787 5787 fpkt->pkt_comp = fcp_icmd_callback;
5788 5788 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 5789 fpkt->pkt_cmdlen = cmd_len;
5790 5790 fpkt->pkt_rsplen = resp_len;
5791 5791 fpkt->pkt_datalen = data_len;
5792 5792
5793 5793 /*
5794 5794 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 5795 * rscn_count as fcp knows down to the transport. If a valid count was
5796 5796 * passed into this function, we allocate memory to actually pass down
5797 5797 * this info.
5798 5798 *
5799 5799 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 5800 * basically mean that fcp will not be able to help transport
5801 5801 * distinguish if a new RSCN has come after fcp was last informed about
5802 5802 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 5803 * 5068068 where the device might end up going offline in case of RSCN
5804 5804 * storms.
5805 5805 */
5806 5806 fpkt->pkt_ulp_rscn_infop = NULL;
5807 5807 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 5808 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 5809 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 5810 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 5811 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 5812 fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 5813 "Failed to alloc memory to pass rscn info");
5814 5814 }
5815 5815 }
5816 5816
5817 5817 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 5818 fc_ulp_rscn_info_t *rscnp;
5819 5819
5820 5820 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 5821 rscnp->ulp_rscn_count = rscn_count;
5822 5822 }
5823 5823
5824 5824 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 5825 goto fail;
5826 5826 }
5827 5827 dma_setup++;
5828 5828
5829 5829 /*
5830 5830 * Must hold target mutex across setting of pkt_pd and call to
5831 5831 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 5832 * away while we're not looking.
5833 5833 */
5834 5834 if (ptgt != NULL) {
5835 5835 mutex_enter(&ptgt->tgt_mutex);
5836 5836 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 5837
5838 5838 /* ask transport to do its initialization on this pkt */
5839 5839 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 5840 != FC_SUCCESS) {
5841 5841 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 5842 fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 5843 "fc_ulp_init_packet failed");
5844 5844 mutex_exit(&ptgt->tgt_mutex);
5845 5845 goto fail;
5846 5846 }
5847 5847 mutex_exit(&ptgt->tgt_mutex);
5848 5848 } else {
5849 5849 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 5850 != FC_SUCCESS) {
5851 5851 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 5852 fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 5853 "fc_ulp_init_packet failed");
5854 5854 goto fail;
5855 5855 }
5856 5856 }
5857 5857
5858 5858 mutex_enter(&pptr->port_mutex);
5859 5859 if (pptr->port_state & (FCP_STATE_DETACHING |
5860 5860 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 5861 int rval;
5862 5862
5863 5863 mutex_exit(&pptr->port_mutex);
5864 5864
5865 5865 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 5866 ASSERT(rval == FC_SUCCESS);
5867 5867
5868 5868 goto fail;
5869 5869 }
5870 5870
5871 5871 if (ptgt != NULL) {
5872 5872 mutex_enter(&ptgt->tgt_mutex);
5873 5873 ptgt->tgt_ipkt_cnt++;
5874 5874 mutex_exit(&ptgt->tgt_mutex);
5875 5875 }
5876 5876
5877 5877 pptr->port_ipkt_cnt++;
5878 5878
5879 5879 mutex_exit(&pptr->port_mutex);
5880 5880
5881 5881 return (icmd);
5882 5882
5883 5883 fail:
5884 5884 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 5885 kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 5886 sizeof (fc_ulp_rscn_info_t));
5887 5887 fpkt->pkt_ulp_rscn_infop = NULL;
5888 5888 }
5889 5889
5890 5890 if (dma_setup) {
5891 5891 fcp_free_dma(pptr, icmd);
5892 5892 }
5893 5893 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 5894 (size_t)pptr->port_dmacookie_sz);
5895 5895
5896 5896 return (NULL);
5897 5897 }
5898 5898
5899 5899 /*
5900 5900 * Function: fcp_icmd_free
5901 5901 *
5902 5902 * Description: Frees the internal command passed by the caller.
5903 5903 *
5904 5904 * Argument: *pptr Fcp port.
5905 5905 * *icmd Internal packet to free.
5906 5906 *
5907 5907 * Return Value: None
5908 5908 */
5909 5909 static void
5910 5910 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 5911 {
5912 5912 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5913 5913
5914 5914 /* Let the underlying layers do their cleanup. */
5915 5915 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 5916 icmd->ipkt_fpkt);
5917 5917
5918 5918 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 5919 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 5920 sizeof (fc_ulp_rscn_info_t));
5921 5921 }
5922 5922
5923 5923 fcp_free_dma(pptr, icmd);
5924 5924
5925 5925 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 5926 (size_t)pptr->port_dmacookie_sz);
5927 5927
5928 5928 mutex_enter(&pptr->port_mutex);
5929 5929
5930 5930 if (ptgt) {
5931 5931 mutex_enter(&ptgt->tgt_mutex);
5932 5932 ptgt->tgt_ipkt_cnt--;
5933 5933 mutex_exit(&ptgt->tgt_mutex);
5934 5934 }
5935 5935
5936 5936 pptr->port_ipkt_cnt--;
5937 5937 mutex_exit(&pptr->port_mutex);
5938 5938 }
5939 5939
5940 5940 /*
5941 5941 * Function: fcp_alloc_dma
5942 5942 *
5943 5943 * Description: Allocated the DMA resources required for the internal
5944 5944 * packet.
5945 5945 *
5946 5946 * Argument: *pptr FCP port.
5947 5947 * *icmd Internal FCP packet.
5948 5948 * nodma Indicates if the Cmd and Resp will be DMAed.
5949 5949 * flags Allocation flags (Sleep or NoSleep).
5950 5950 *
5951 5951 * Return Value: FC_SUCCESS
5952 5952 * FC_NOMEM
5953 5953 */
5954 5954 static int
5955 5955 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956 5956 int nodma, int flags)
5957 5957 {
5958 5958 int rval;
5959 5959 size_t real_size;
5960 5960 uint_t ccount;
5961 5961 int bound = 0;
5962 5962 int cmd_resp = 0;
5963 5963 fc_packet_t *fpkt;
5964 5964 ddi_dma_cookie_t pkt_data_cookie;
5965 5965 ddi_dma_cookie_t *cp;
5966 5966 uint32_t cnt;
5967 5967
5968 5968 fpkt = &icmd->ipkt_fc_packet;
5969 5969
5970 5970 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 5971 fpkt->pkt_resp_dma == NULL);
5972 5972
5973 5973 icmd->ipkt_nodma = nodma;
5974 5974
5975 5975 if (nodma) {
5976 5976 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 5977 if (fpkt->pkt_cmd == NULL) {
5978 5978 goto fail;
5979 5979 }
5980 5980
5981 5981 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 5982 if (fpkt->pkt_resp == NULL) {
5983 5983 goto fail;
5984 5984 }
5985 5985 } else {
5986 5986 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 5987
5988 5988 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 5989 if (rval == FC_FAILURE) {
5990 5990 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 5991 fpkt->pkt_resp_dma == NULL);
5992 5992 goto fail;
5993 5993 }
5994 5994 cmd_resp++;
5995 5995 }
5996 5996
5997 5997 if ((fpkt->pkt_datalen != 0) &&
5998 5998 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 5999 /*
6000 6000 * set up DMA handle and memory for the data in this packet
6001 6001 */
6002 6002 if (ddi_dma_alloc_handle(pptr->port_dip,
6003 6003 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 6004 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 6005 goto fail;
6006 6006 }
6007 6007
6008 6008 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 6009 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 6010 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 6011 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 6012 goto fail;
6013 6013 }
6014 6014
6015 6015 /* was DMA mem size gotten < size asked for/needed ?? */
6016 6016 if (real_size < fpkt->pkt_datalen) {
6017 6017 goto fail;
6018 6018 }
6019 6019
6020 6020 /* bind DMA address and handle together */
6021 6021 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 6022 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 6023 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 6024 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 6025 goto fail;
6026 6026 }
6027 6027 bound++;
6028 6028
6029 6029 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 6030 goto fail;
6031 6031 }
6032 6032
6033 6033 fpkt->pkt_data_cookie_cnt = ccount;
6034 6034
6035 6035 cp = fpkt->pkt_data_cookie;
6036 6036 *cp = pkt_data_cookie;
6037 6037 cp++;
6038 6038
6039 6039 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 6040 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 6041 &pkt_data_cookie);
6042 6042 *cp = pkt_data_cookie;
6043 6043 }
6044 6044
6045 6045 } else if (fpkt->pkt_datalen != 0) {
6046 6046 /*
6047 6047 * If it's a pseudo FCA, then it can't support DMA even in
6048 6048 * SCSI data phase.
6049 6049 */
6050 6050 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 6051 if (fpkt->pkt_data == NULL) {
6052 6052 goto fail;
6053 6053 }
6054 6054
6055 6055 }
6056 6056
6057 6057 return (FC_SUCCESS);
6058 6058
6059 6059 fail:
6060 6060 if (bound) {
6061 6061 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 6062 }
6063 6063
6064 6064 if (fpkt->pkt_data_dma) {
6065 6065 if (fpkt->pkt_data) {
6066 6066 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 6067 }
6068 6068 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 6069 } else {
6070 6070 if (fpkt->pkt_data) {
6071 6071 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 6072 }
6073 6073 }
6074 6074
6075 6075 if (nodma) {
6076 6076 if (fpkt->pkt_cmd) {
6077 6077 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 6078 }
6079 6079 if (fpkt->pkt_resp) {
6080 6080 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 6081 }
6082 6082 } else {
6083 6083 if (cmd_resp) {
6084 6084 fcp_free_cmd_resp(pptr, fpkt);
6085 6085 }
6086 6086 }
6087 6087
6088 6088 return (FC_NOMEM);
6089 6089 }
6090 6090
6091 6091
6092 6092 static void
6093 6093 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 6094 {
6095 6095 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 6096
6097 6097 if (fpkt->pkt_data_dma) {
6098 6098 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 6099 if (fpkt->pkt_data) {
6100 6100 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 6101 }
6102 6102 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 6103 } else {
6104 6104 if (fpkt->pkt_data) {
6105 6105 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 6106 }
6107 6107 /*
6108 6108 * Need we reset pkt_* to zero???
6109 6109 */
6110 6110 }
6111 6111
6112 6112 if (icmd->ipkt_nodma) {
6113 6113 if (fpkt->pkt_cmd) {
6114 6114 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 6115 }
6116 6116 if (fpkt->pkt_resp) {
6117 6117 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 6118 }
6119 6119 } else {
6120 6120 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 6121
6122 6122 fcp_free_cmd_resp(pptr, fpkt);
6123 6123 }
6124 6124 }
6125 6125
6126 6126 /*
6127 6127 * Function: fcp_lookup_target
6128 6128 *
6129 6129 * Description: Finds a target given a WWN.
6130 6130 *
6131 6131 * Argument: *pptr FCP port.
6132 6132 * *wwn World Wide Name of the device to look for.
6133 6133 *
6134 6134 * Return Value: NULL No target found
6135 6135 * Not NULL Target structure
6136 6136 *
6137 6137 * Context: Interrupt context.
6138 6138 * The mutex pptr->port_mutex must be owned.
6139 6139 */
6140 6140 /* ARGSUSED */
6141 6141 static struct fcp_tgt *
6142 6142 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 6143 {
6144 6144 int hash;
6145 6145 struct fcp_tgt *ptgt;
6146 6146
6147 6147 ASSERT(mutex_owned(&pptr->port_mutex));
6148 6148
6149 6149 hash = FCP_HASH(wwn);
6150 6150
6151 6151 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 6152 ptgt = ptgt->tgt_next) {
6153 6153 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 6154 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 6155 sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 6156 break;
6157 6157 }
6158 6158 }
6159 6159
6160 6160 return (ptgt);
6161 6161 }
6162 6162
6163 6163
6164 6164 /*
6165 6165 * Find target structure given a port identifier
6166 6166 */
6167 6167 static struct fcp_tgt *
6168 6168 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 6169 {
6170 6170 fc_portid_t port_id;
6171 6171 la_wwn_t pwwn;
6172 6172 struct fcp_tgt *ptgt = NULL;
6173 6173
6174 6174 port_id.priv_lilp_posit = 0;
6175 6175 port_id.port_id = d_id;
6176 6176 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 6177 &pwwn) == FC_SUCCESS) {
6178 6178 mutex_enter(&pptr->port_mutex);
6179 6179 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 6180 mutex_exit(&pptr->port_mutex);
6181 6181 }
6182 6182
6183 6183 return (ptgt);
6184 6184 }
6185 6185
6186 6186
6187 6187 /*
6188 6188 * the packet completion callback routine for info cmd pkts
6189 6189 *
6190 6190 * this means fpkt pts to a response to either a PLOGI or a PRLI
6191 6191 *
6192 6192 * if there is an error an attempt is made to call a routine to resend
6193 6193 * the command that failed
6194 6194 */
6195 6195 static void
6196 6196 fcp_icmd_callback(fc_packet_t *fpkt)
6197 6197 {
6198 6198 struct fcp_ipkt *icmd;
6199 6199 struct fcp_port *pptr;
6200 6200 struct fcp_tgt *ptgt;
6201 6201 struct la_els_prli *prli;
6202 6202 struct la_els_prli prli_s;
6203 6203 struct fcp_prli *fprli;
6204 6204 struct fcp_lun *plun;
6205 6205 int free_pkt = 1;
6206 6206 int rval;
6207 6207 ls_code_t resp;
6208 6208 uchar_t prli_acc = 0;
6209 6209 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6210 6210 int lun0_newalloc;
6211 6211
6212 6212 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 6213
6214 6214 /* get ptrs to the port and target structs for the cmd */
6215 6215 pptr = icmd->ipkt_port;
6216 6216 ptgt = icmd->ipkt_tgt;
6217 6217
6218 6218 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 6219
6220 6220 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 6221 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 6222 sizeof (prli_s));
6223 6223 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 6224 }
6225 6225
6226 6226 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 6227 fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 6228 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 6229 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 6230 ptgt->tgt_d_id);
6231 6231
6232 6232 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 6233 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 6234
6235 6235 mutex_enter(&ptgt->tgt_mutex);
6236 6236 if (ptgt->tgt_pd_handle == NULL) {
6237 6237 /*
6238 6238 * in a fabric environment the port device handles
6239 6239 * get created only after successful LOGIN into the
6240 6240 * transport, so the transport makes this port
6241 6241 * device (pd) handle available in this packet, so
6242 6242 * save it now
6243 6243 */
6244 6244 ASSERT(fpkt->pkt_pd != NULL);
6245 6245 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 6246 }
6247 6247 mutex_exit(&ptgt->tgt_mutex);
6248 6248
6249 6249 /* which ELS cmd is this response for ?? */
6250 6250 switch (icmd->ipkt_opcode) {
6251 6251 case LA_ELS_PLOGI:
6252 6252 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 6253 fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 6254 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 6255 ptgt->tgt_d_id,
6256 6256 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 6257 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 6258
6259 6259 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 6260 FCP_TGT_TRACE_15);
6261 6261
6262 6262 /* Note that we are not allocating a new icmd */
6263 6263 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 6264 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 6265 icmd->ipkt_cause) != DDI_SUCCESS) {
6266 6266 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 6267 FCP_TGT_TRACE_16);
6268 6268 goto fail;
6269 6269 }
6270 6270 break;
6271 6271
6272 6272 case LA_ELS_PRLI:
6273 6273 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 6274 fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 6275 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 6276
6277 6277 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 6278 FCP_TGT_TRACE_17);
6279 6279
6280 6280 prli = &prli_s;
6281 6281
6282 6282 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 6283 sizeof (prli_s));
6284 6284
6285 6285 fprli = (struct fcp_prli *)prli->service_params;
6286 6286
6287 6287 mutex_enter(&ptgt->tgt_mutex);
6288 6288 ptgt->tgt_icap = fprli->initiator_fn;
6289 6289 ptgt->tgt_tcap = fprli->target_fn;
6290 6290 mutex_exit(&ptgt->tgt_mutex);
6291 6291
6292 6292 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 6293 /*
6294 6294 * this FCP device does not support target mode
6295 6295 */
6296 6296 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 6297 FCP_TGT_TRACE_18);
6298 6298 goto fail;
6299 6299 }
6300 6300 if (fprli->retry == 1) {
6301 6301 fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 6302 &ptgt->tgt_port_wwn);
6303 6303 }
6304 6304
6305 6305 /* target is no longer offline */
6306 6306 mutex_enter(&pptr->port_mutex);
6307 6307 mutex_enter(&ptgt->tgt_mutex);
6308 6308 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 6309 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 6310 FCP_TGT_MARK);
6311 6311 } else {
6312 6312 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 6313 fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 6314 "fcp_icmd_callback,1: state change "
6315 6315 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 6316 mutex_exit(&ptgt->tgt_mutex);
6317 6317 mutex_exit(&pptr->port_mutex);
6318 6318 goto fail;
6319 6319 }
6320 6320 mutex_exit(&ptgt->tgt_mutex);
6321 6321 mutex_exit(&pptr->port_mutex);
6322 6322
6323 6323 /*
6324 6324 * lun 0 should always respond to inquiry, so
6325 6325 * get the LUN struct for LUN 0
6326 6326 *
6327 6327 * Currently we deal with first level of addressing.
6328 6328 * If / when we start supporting 0x device types
6329 6329 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 6330 * this logic will need revisiting.
6331 6331 */
6332 6332 lun0_newalloc = 0;
6333 6333 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 6334 /*
6335 6335 * no LUN struct for LUN 0 yet exists,
6336 6336 * so create one
6337 6337 */
6338 6338 plun = fcp_alloc_lun(ptgt);
6339 6339 if (plun == NULL) {
6340 6340 fcp_log(CE_WARN, pptr->port_dip,
6341 6341 "!Failed to allocate lun 0 for"
6342 6342 " D_ID=%x", ptgt->tgt_d_id);
6343 6343 goto fail;
6344 6344 }
6345 6345 lun0_newalloc = 1;
6346 6346 }
6347 6347
6348 6348 /* fill in LUN info */
6349 6349 mutex_enter(&ptgt->tgt_mutex);
6350 6350 /*
6351 6351 * consider lun 0 as device not connected if it is
6352 6352 * offlined or newly allocated
6353 6353 */
6354 6354 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 6355 lun0_newalloc) {
6356 6356 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 6357 }
6358 6358 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 6359 plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 6360 ptgt->tgt_lun_cnt = 1;
6361 6361 ptgt->tgt_report_lun_cnt = 0;
6362 6362 mutex_exit(&ptgt->tgt_mutex);
6363 6363
6364 6364 /* Retrieve the rscn count (if a valid one exists) */
6365 6365 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 6366 rscn_count = ((fc_ulp_rscn_info_t *)
6367 6367 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 6368 ->ulp_rscn_count;
6369 6369 } else {
6370 6370 rscn_count = FC_INVALID_RSCN_COUNT;
6371 6371 }
6372 6372
6373 6373 /* send Report Lun request to target */
6374 6374 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 6375 sizeof (struct fcp_reportlun_resp),
6376 6376 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 6377 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 6378 mutex_enter(&pptr->port_mutex);
6379 6379 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 6380 fcp_log(CE_WARN, pptr->port_dip,
6381 6381 "!Failed to send REPORT LUN to"
6382 6382 " D_ID=%x", ptgt->tgt_d_id);
6383 6383 } else {
6384 6384 FCP_TRACE(fcp_logq,
6385 6385 pptr->port_instbuf, fcp_trace,
6386 6386 FCP_BUF_LEVEL_5, 0,
6387 6387 "fcp_icmd_callback,2:state change"
6388 6388 " occured for D_ID=0x%x",
6389 6389 ptgt->tgt_d_id);
6390 6390 }
6391 6391 mutex_exit(&pptr->port_mutex);
6392 6392
6393 6393 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 6394 FCP_TGT_TRACE_19);
6395 6395
6396 6396 goto fail;
6397 6397 } else {
6398 6398 free_pkt = 0;
6399 6399 fcp_icmd_free(pptr, icmd);
6400 6400 }
6401 6401 break;
6402 6402
6403 6403 default:
6404 6404 fcp_log(CE_WARN, pptr->port_dip,
6405 6405 "!fcp_icmd_callback Invalid opcode");
6406 6406 goto fail;
6407 6407 }
6408 6408
6409 6409 return;
6410 6410 }
6411 6411
6412 6412
6413 6413 /*
6414 6414 * Other PLOGI failures are not retried as the
6415 6415 * transport does it already
6416 6416 */
6417 6417 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 6418 if (fcp_is_retryable(icmd) &&
6419 6419 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 6420
6421 6421 if (FCP_MUST_RETRY(fpkt)) {
6422 6422 fcp_queue_ipkt(pptr, fpkt);
6423 6423 return;
6424 6424 }
6425 6425
6426 6426 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 6427 fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 6428 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 6429 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 6430 fpkt->pkt_reason);
6431 6431
6432 6432 /*
6433 6433 * Retry by recalling the routine that
6434 6434 * originally queued this packet
6435 6435 */
6436 6436 mutex_enter(&pptr->port_mutex);
6437 6437 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 6438 caddr_t msg;
6439 6439
6440 6440 mutex_exit(&pptr->port_mutex);
6441 6441
6442 6442 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 6443
6444 6444 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 6445 fpkt->pkt_timeout +=
6446 6446 FCP_TIMEOUT_DELTA;
6447 6447 }
6448 6448
6449 6449 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 6450 fpkt);
6451 6451 if (rval == FC_SUCCESS) {
6452 6452 return;
6453 6453 }
6454 6454
6455 6455 if (rval == FC_STATEC_BUSY ||
6456 6456 rval == FC_OFFLINE) {
6457 6457 fcp_queue_ipkt(pptr, fpkt);
6458 6458 return;
6459 6459 }
6460 6460 (void) fc_ulp_error(rval, &msg);
6461 6461
6462 6462 fcp_log(CE_NOTE, pptr->port_dip,
6463 6463 "!ELS 0x%x failed to d_id=0x%x;"
6464 6464 " %s", icmd->ipkt_opcode,
6465 6465 ptgt->tgt_d_id, msg);
6466 6466 } else {
6467 6467 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 6468 fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 6469 "fcp_icmd_callback,3: state change "
6470 6470 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 6471 mutex_exit(&pptr->port_mutex);
6472 6472 }
6473 6473 }
6474 6474 } else {
6475 6475 if (fcp_is_retryable(icmd) &&
6476 6476 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 6477 if (FCP_MUST_RETRY(fpkt)) {
6478 6478 fcp_queue_ipkt(pptr, fpkt);
6479 6479 return;
6480 6480 }
6481 6481 }
6482 6482 mutex_enter(&pptr->port_mutex);
6483 6483 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 6484 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 6485 mutex_exit(&pptr->port_mutex);
6486 6486 fcp_print_error(fpkt);
6487 6487 } else {
6488 6488 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 6489 fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 6490 "fcp_icmd_callback,4: state change occured"
6491 6491 " for D_ID=0x%x", ptgt->tgt_d_id);
6492 6492 mutex_exit(&pptr->port_mutex);
6493 6493 }
6494 6494 }
6495 6495
6496 6496 fail:
6497 6497 if (free_pkt) {
6498 6498 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 6499 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 6500 fcp_icmd_free(pptr, icmd);
6501 6501 }
6502 6502 }
6503 6503
6504 6504
6505 6505 /*
6506 6506 * called internally to send an info cmd using the transport
6507 6507 *
6508 6508 * sends either an INQ or a REPORT_LUN
6509 6509 *
6510 6510 * when the packet is completed fcp_scsi_callback is called
6511 6511 */
6512 6512 static int
6513 6513 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514 6514 int lcount, int tcount, int cause, uint32_t rscn_count)
6515 6515 {
6516 6516 int nodma;
6517 6517 struct fcp_ipkt *icmd;
6518 6518 struct fcp_tgt *ptgt;
6519 6519 struct fcp_port *pptr;
6520 6520 fc_frame_hdr_t *hp;
6521 6521 fc_packet_t *fpkt;
6522 6522 struct fcp_cmd fcp_cmd;
6523 6523 struct fcp_cmd *fcmd;
6524 6524 union scsi_cdb *scsi_cdb;
6525 6525
6526 6526 ASSERT(plun != NULL);
6527 6527
6528 6528 ptgt = plun->lun_tgt;
6529 6529 ASSERT(ptgt != NULL);
6530 6530
6531 6531 pptr = ptgt->tgt_port;
6532 6532 ASSERT(pptr != NULL);
6533 6533
6534 6534 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 6535 fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 6536 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 6537
6538 6538 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 6539 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 6540 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 6541 rscn_count);
6542 6542
6543 6543 if (icmd == NULL) {
6544 6544 return (DDI_FAILURE);
6545 6545 }
6546 6546
6547 6547 fpkt = icmd->ipkt_fpkt;
6548 6548 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 6549 icmd->ipkt_retries = 0;
6550 6550 icmd->ipkt_opcode = opcode;
6551 6551 icmd->ipkt_lun = plun;
6552 6552
6553 6553 if (nodma) {
6554 6554 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 6555 } else {
6556 6556 fcmd = &fcp_cmd;
6557 6557 }
6558 6558 bzero(fcmd, sizeof (struct fcp_cmd));
6559 6559
6560 6560 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 6561
6562 6562 hp = &fpkt->pkt_cmd_fhdr;
6563 6563
6564 6564 hp->s_id = pptr->port_id;
6565 6565 hp->d_id = ptgt->tgt_d_id;
6566 6566 hp->r_ctl = R_CTL_COMMAND;
6567 6567 hp->type = FC_TYPE_SCSI_FCP;
6568 6568 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 6569 hp->rsvd = 0;
6570 6570 hp->seq_id = 0;
6571 6571 hp->seq_cnt = 0;
6572 6572 hp->ox_id = 0xffff;
6573 6573 hp->rx_id = 0xffff;
6574 6574 hp->ro = 0;
6575 6575
6576 6576 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 6577
6578 6578 /*
6579 6579 * Request SCSI target for expedited processing
6580 6580 */
6581 6581
6582 6582 /*
6583 6583 * Set up for untagged queuing because we do not
6584 6584 * know if the fibre device supports queuing.
6585 6585 */
6586 6586 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 6587 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 6588 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 6589 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 6590 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 6591 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 6592 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 6593
6594 6594 switch (opcode) {
6595 6595 case SCMD_INQUIRY_PAGE83:
6596 6596 /*
6597 6597 * Prepare to get the Inquiry VPD page 83 information
6598 6598 */
6599 6599 fcmd->fcp_cntl.cntl_read_data = 1;
6600 6600 fcmd->fcp_cntl.cntl_write_data = 0;
6601 6601 fcmd->fcp_data_len = alloc_len;
6602 6602
6603 6603 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 6604 fpkt->pkt_comp = fcp_scsi_callback;
6605 6605
6606 6606 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 6607 scsi_cdb->g0_addr2 = 0x01;
6608 6608 scsi_cdb->g0_addr1 = 0x83;
6609 6609 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 6610 break;
6611 6611
6612 6612 case SCMD_INQUIRY:
6613 6613 fcmd->fcp_cntl.cntl_read_data = 1;
6614 6614 fcmd->fcp_cntl.cntl_write_data = 0;
6615 6615 fcmd->fcp_data_len = alloc_len;
6616 6616
6617 6617 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 6618 fpkt->pkt_comp = fcp_scsi_callback;
6619 6619
6620 6620 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 6621 scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 6622 break;
6623 6623
6624 6624 case SCMD_REPORT_LUN: {
6625 6625 fc_portid_t d_id;
6626 6626 opaque_t fca_dev;
6627 6627
6628 6628 ASSERT(alloc_len >= 16);
6629 6629
6630 6630 d_id.priv_lilp_posit = 0;
6631 6631 d_id.port_id = ptgt->tgt_d_id;
6632 6632
6633 6633 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 6634
6635 6635 mutex_enter(&ptgt->tgt_mutex);
6636 6636 ptgt->tgt_fca_dev = fca_dev;
6637 6637 mutex_exit(&ptgt->tgt_mutex);
6638 6638
6639 6639 fcmd->fcp_cntl.cntl_read_data = 1;
6640 6640 fcmd->fcp_cntl.cntl_write_data = 0;
6641 6641 fcmd->fcp_data_len = alloc_len;
6642 6642
6643 6643 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 6644 fpkt->pkt_comp = fcp_scsi_callback;
6645 6645
6646 6646 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 6647 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 6648 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 6649 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 6650 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 6651 break;
6652 6652 }
6653 6653
6654 6654 default:
6655 6655 fcp_log(CE_WARN, pptr->port_dip,
6656 6656 "!fcp_send_scsi Invalid opcode");
6657 6657 break;
6658 6658 }
6659 6659
6660 6660 if (!nodma) {
6661 6661 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 6662 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 6663 }
6664 6664
6665 6665 mutex_enter(&pptr->port_mutex);
6666 6666 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 6667
6668 6668 mutex_exit(&pptr->port_mutex);
6669 6669 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 6670 FC_SUCCESS) {
6671 6671 fcp_icmd_free(pptr, icmd);
6672 6672 return (DDI_FAILURE);
6673 6673 }
6674 6674 return (DDI_SUCCESS);
6675 6675 } else {
6676 6676 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 6677 fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 6678 "fcp_send_scsi,1: state change occured"
6679 6679 " for D_ID=0x%x", ptgt->tgt_d_id);
6680 6680 mutex_exit(&pptr->port_mutex);
6681 6681 fcp_icmd_free(pptr, icmd);
6682 6682 return (DDI_FAILURE);
6683 6683 }
6684 6684 }
6685 6685
6686 6686
6687 6687 /*
6688 6688 * called by fcp_scsi_callback to check to handle the case where
6689 6689 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690 6690 */
6691 6691 static int
6692 6692 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 6693 {
6694 6694 uchar_t rqlen;
6695 6695 int rval = DDI_FAILURE;
6696 6696 struct scsi_extended_sense sense_info, *sense;
6697 6697 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6698 6698 fpkt->pkt_ulp_private;
6699 6699 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6700 6700 struct fcp_port *pptr = ptgt->tgt_port;
6701 6701
6702 6702 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 6703
6704 6704 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 6705 /*
6706 6706 * SCSI-II Reserve Release support. Some older FC drives return
6707 6707 * Reservation conflict for Report Luns command.
6708 6708 */
6709 6709 if (icmd->ipkt_nodma) {
6710 6710 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 6711 rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 6712 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 6713 } else {
6714 6714 fcp_rsp_t new_resp;
6715 6715
6716 6716 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 6717 fpkt->pkt_resp_acc, sizeof (new_resp));
6718 6718
6719 6719 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 6720 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 6721 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 6722
6723 6723 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 6724 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 6725 }
6726 6726
6727 6727 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 6728 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 6729
6730 6730 return (DDI_SUCCESS);
6731 6731 }
6732 6732
6733 6733 sense = &sense_info;
6734 6734 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 6735 /* no need to continue if sense length is not set */
6736 6736 return (rval);
6737 6737 }
6738 6738
6739 6739 /* casting 64-bit integer to 8-bit */
6740 6740 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 6741 sizeof (struct scsi_extended_sense));
6742 6742
6743 6743 if (rqlen < 14) {
6744 6744 /* no need to continue if request length isn't long enough */
6745 6745 return (rval);
6746 6746 }
6747 6747
6748 6748 if (icmd->ipkt_nodma) {
6749 6749 /*
6750 6750 * We can safely use fcp_response_len here since the
6751 6751 * only path that calls fcp_check_reportlun,
6752 6752 * fcp_scsi_callback, has already called
6753 6753 * fcp_validate_fcp_response.
6754 6754 */
6755 6755 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 6756 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 6757 } else {
6758 6758 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 6759 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 6760 sizeof (struct scsi_extended_sense));
6761 6761 }
6762 6762
6763 6763 if (!FCP_SENSE_NO_LUN(sense)) {
6764 6764 mutex_enter(&ptgt->tgt_mutex);
6765 6765 /* clear the flag if any */
6766 6766 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 6767 mutex_exit(&ptgt->tgt_mutex);
6768 6768 }
6769 6769
6770 6770 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 6771 (sense->es_add_code == 0x20)) {
6772 6772 if (icmd->ipkt_nodma) {
6773 6773 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 6774 rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 6775 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 6776 } else {
6777 6777 fcp_rsp_t new_resp;
6778 6778
6779 6779 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 6780 fpkt->pkt_resp_acc, sizeof (new_resp));
6781 6781
6782 6782 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 6783 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 6784 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 6785
6786 6786 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 6787 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 6788 }
6789 6789
6790 6790 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 6791 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 6792
6793 6793 return (DDI_SUCCESS);
6794 6794 }
6795 6795
6796 6796 /*
6797 6797 * This is for the STK library which returns a check condition,
6798 6798 * to indicate device is not ready, manual assistance needed.
6799 6799 * This is to a report lun command when the door is open.
6800 6800 */
6801 6801 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 6802 if (icmd->ipkt_nodma) {
6803 6803 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 6804 rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 6805 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 6806 } else {
6807 6807 fcp_rsp_t new_resp;
6808 6808
6809 6809 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 6810 fpkt->pkt_resp_acc, sizeof (new_resp));
6811 6811
6812 6812 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 6813 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 6814 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 6815
6816 6816 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 6817 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 6818 }
6819 6819
6820 6820 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 6821 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 6822
6823 6823 return (DDI_SUCCESS);
6824 6824 }
6825 6825
6826 6826 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 6827 (FCP_SENSE_NO_LUN(sense))) {
6828 6828 mutex_enter(&ptgt->tgt_mutex);
6829 6829 if ((FCP_SENSE_NO_LUN(sense)) &&
6830 6830 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 6831 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 6832 mutex_exit(&ptgt->tgt_mutex);
6833 6833 /*
6834 6834 * reconfig was triggred by ILLEGAL REQUEST but
6835 6835 * got ILLEGAL REQUEST again
6836 6836 */
6837 6837 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 6838 fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 6839 "!FCP: Unable to obtain Report Lun data"
6840 6840 " target=%x", ptgt->tgt_d_id);
6841 6841 } else {
6842 6842 if (ptgt->tgt_tid == NULL) {
6843 6843 timeout_id_t tid;
6844 6844 /*
6845 6845 * REPORT LUN data has changed. Kick off
6846 6846 * rediscovery
6847 6847 */
6848 6848 tid = timeout(fcp_reconfigure_luns,
6849 6849 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 6850
6851 6851 ptgt->tgt_tid = tid;
6852 6852 ptgt->tgt_state |= FCP_TGT_BUSY;
6853 6853 }
6854 6854 if (FCP_SENSE_NO_LUN(sense)) {
6855 6855 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 6856 }
6857 6857 mutex_exit(&ptgt->tgt_mutex);
6858 6858 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 6859 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 6860 fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 6861 "!FCP:Report Lun Has Changed"
6862 6862 " target=%x", ptgt->tgt_d_id);
6863 6863 } else if (FCP_SENSE_NO_LUN(sense)) {
6864 6864 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 6865 fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 6866 "!FCP:LU Not Supported"
6867 6867 " target=%x", ptgt->tgt_d_id);
6868 6868 }
6869 6869 }
6870 6870 rval = DDI_SUCCESS;
6871 6871 }
6872 6872
6873 6873 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 6874 fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 6875 "D_ID=%x, sense=%x, status=%x",
6876 6876 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 6877 rsp->fcp_u.fcp_status.scsi_status);
6878 6878
6879 6879 return (rval);
6880 6880 }
6881 6881
6882 6882 /*
6883 6883 * Function: fcp_scsi_callback
6884 6884 *
6885 6885 * Description: This is the callback routine set by fcp_send_scsi() after
6886 6886 * it calls fcp_icmd_alloc(). The SCSI command completed here
6887 6887 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6888 6888 * INQUIRY_PAGE83.
6889 6889 *
6890 6890 * Argument: *fpkt FC packet used to convey the command
6891 6891 *
6892 6892 * Return Value: None
6893 6893 */
6894 6894 static void
6895 6895 fcp_scsi_callback(fc_packet_t *fpkt)
6896 6896 {
6897 6897 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6898 6898 fpkt->pkt_ulp_private;
6899 6899 struct fcp_rsp_info fcp_rsp_err, *bep;
6900 6900 struct fcp_port *pptr;
6901 6901 struct fcp_tgt *ptgt;
6902 6902 struct fcp_lun *plun;
6903 6903 struct fcp_rsp response, *rsp;
6904 6904
6905 6905 ptgt = icmd->ipkt_tgt;
6906 6906 pptr = ptgt->tgt_port;
6907 6907 plun = icmd->ipkt_lun;
6908 6908
6909 6909 if (icmd->ipkt_nodma) {
6910 6910 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 6911 } else {
6912 6912 rsp = &response;
6913 6913 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 6914 sizeof (struct fcp_rsp));
6915 6915 }
6916 6916
6917 6917 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 6918 fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 6919 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 6920 "status=%x, lun num=%x",
6921 6921 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 6922 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 6923
6924 6924 /*
6925 6925 * Pre-init LUN GUID with NWWN if it is not a device that
6926 6926 * supports multiple luns and we know it's not page83
6927 6927 * compliant. Although using a NWWN is not lun unique,
6928 6928 * we will be fine since there is only one lun behind the taget
6929 6929 * in this case.
6930 6930 */
6931 6931 if ((plun->lun_guid_size == 0) &&
6932 6932 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 6933 (fcp_symmetric_device_probe(plun) == 0)) {
6934 6934
6935 6935 char ascii_wwn[FC_WWN_SIZE*2+1];
6936 6936 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 6937 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 6938 }
6939 6939
6940 6940 /*
6941 6941 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 6942 * when thay have more data than what is asked in CDB. An overrun
6943 6943 * is really when FCP_DL is smaller than the data length in CDB.
6944 6944 * In the case here we know that REPORT LUN command we formed within
6945 6945 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 6946 * behavior. In reality this is FC_SUCCESS.
6947 6947 */
6948 6948 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 6949 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 6950 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 6951 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 6952 }
6953 6953
6954 6954 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 6955 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 6956 fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 6957 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 6958 ptgt->tgt_d_id);
6959 6959
6960 6960 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 6961 /*
6962 6962 * Inquiry VPD page command on A5K SES devices would
6963 6963 * result in data CRC errors.
6964 6964 */
6965 6965 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 6966 (void) fcp_handle_page83(fpkt, icmd, 1);
6967 6967 return;
6968 6968 }
6969 6969 }
6970 6970 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 6971 FCP_MUST_RETRY(fpkt)) {
6972 6972 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 6973 fcp_retry_scsi_cmd(fpkt);
6974 6974 return;
6975 6975 }
6976 6976
6977 6977 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 6978 FCP_TGT_TRACE_20);
6979 6979
6980 6980 mutex_enter(&pptr->port_mutex);
6981 6981 mutex_enter(&ptgt->tgt_mutex);
6982 6982 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 6983 mutex_exit(&ptgt->tgt_mutex);
6984 6984 mutex_exit(&pptr->port_mutex);
6985 6985 fcp_print_error(fpkt);
6986 6986 } else {
6987 6987 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 6988 fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 6989 "fcp_scsi_callback,1: state change occured"
6990 6990 " for D_ID=0x%x", ptgt->tgt_d_id);
6991 6991 mutex_exit(&ptgt->tgt_mutex);
6992 6992 mutex_exit(&pptr->port_mutex);
6993 6993 }
6994 6994 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 6995 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 6996 fcp_icmd_free(pptr, icmd);
6997 6997 return;
6998 6998 }
6999 6999
7000 7000 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 7001
7002 7002 mutex_enter(&pptr->port_mutex);
7003 7003 mutex_enter(&ptgt->tgt_mutex);
7004 7004 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 7005 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 7006 fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 7007 "fcp_scsi_callback,2: state change occured"
7008 7008 " for D_ID=0x%x", ptgt->tgt_d_id);
7009 7009 mutex_exit(&ptgt->tgt_mutex);
7010 7010 mutex_exit(&pptr->port_mutex);
7011 7011 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 7012 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 7013 fcp_icmd_free(pptr, icmd);
7014 7014 return;
7015 7015 }
7016 7016 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 7017
7018 7018 mutex_exit(&ptgt->tgt_mutex);
7019 7019 mutex_exit(&pptr->port_mutex);
7020 7020
7021 7021 if (icmd->ipkt_nodma) {
7022 7022 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 7023 sizeof (struct fcp_rsp));
7024 7024 } else {
7025 7025 bep = &fcp_rsp_err;
7026 7026 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 7027 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 7028 }
7029 7029
7030 7030 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 7031 fcp_retry_scsi_cmd(fpkt);
7032 7032 return;
7033 7033 }
7034 7034
7035 7035 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 7036 FCP_NO_FAILURE) {
7037 7037 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 7038 fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 7039 "rsp_code=0x%x, rsp_len_set=0x%x",
7040 7040 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 7041 fcp_retry_scsi_cmd(fpkt);
7042 7042 return;
7043 7043 }
7044 7044
7045 7045 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 7046 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 7047 fcp_queue_ipkt(pptr, fpkt);
7048 7048 return;
7049 7049 }
7050 7050
7051 7051 /*
7052 7052 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 7053 * with illegal request as per SCSI spec.
7054 7054 * Crossbridge is one such device and Daktari's SES node is another.
7055 7055 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 7056 * SES nodes (Daktari only currently) are an exception to this.
7057 7057 */
7058 7058 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 7059 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 7060
7061 7061 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 7062 fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 7063 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 7064 "check condition. May enumerate as non-mpxio device",
7065 7065 ptgt->tgt_d_id, plun->lun_type);
7066 7066
7067 7067 /*
7068 7068 * If we let Daktari's SES be enumerated as a non-mpxio
7069 7069 * device, there will be a discrepency in that the other
7070 7070 * internal FC disks will get enumerated as mpxio devices.
7071 7071 * Applications like luxadm expect this to be consistent.
7072 7072 *
7073 7073 * So, we put in a hack here to check if this is an SES device
7074 7074 * and handle it here.
7075 7075 */
7076 7076 if (plun->lun_type == DTYPE_ESI) {
7077 7077 /*
7078 7078 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 7079 * at this stage, we fake a failure here so that
7080 7080 * fcp_handle_page83 will create a device path using
7081 7081 * the WWN instead of the GUID which is not there anyway
7082 7082 */
7083 7083 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 7084 (void) fcp_handle_page83(fpkt, icmd, 1);
7085 7085 return;
7086 7086 }
7087 7087
7088 7088 mutex_enter(&ptgt->tgt_mutex);
7089 7089 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 7090 FCP_LUN_MARK | FCP_LUN_BUSY);
7091 7091 mutex_exit(&ptgt->tgt_mutex);
7092 7092
7093 7093 (void) fcp_call_finish_init(pptr, ptgt,
7094 7094 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 7095 icmd->ipkt_cause);
7096 7096 fcp_icmd_free(pptr, icmd);
7097 7097 return;
7098 7098 }
7099 7099
7100 7100 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 7101 int rval = DDI_FAILURE;
7102 7102
7103 7103 /*
7104 7104 * handle cases where report lun isn't supported
7105 7105 * by faking up our own REPORT_LUN response or
7106 7106 * UNIT ATTENTION
7107 7107 */
7108 7108 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 7109 rval = fcp_check_reportlun(rsp, fpkt);
7110 7110
7111 7111 /*
7112 7112 * fcp_check_reportlun might have modified the
7113 7113 * FCP response. Copy it in again to get an updated
7114 7114 * FCP response
7115 7115 */
7116 7116 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 7117 rsp = &response;
7118 7118
7119 7119 FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 7120 fpkt->pkt_resp_acc,
7121 7121 sizeof (struct fcp_rsp));
7122 7122 }
7123 7123 }
7124 7124
7125 7125 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 7126 if (rval == DDI_SUCCESS) {
7127 7127 (void) fcp_call_finish_init(pptr, ptgt,
7128 7128 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 7129 icmd->ipkt_cause);
7130 7130 fcp_icmd_free(pptr, icmd);
7131 7131 } else {
7132 7132 fcp_retry_scsi_cmd(fpkt);
7133 7133 }
7134 7134
7135 7135 return;
7136 7136 }
7137 7137 } else {
7138 7138 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 7139 mutex_enter(&ptgt->tgt_mutex);
7140 7140 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 7141 mutex_exit(&ptgt->tgt_mutex);
7142 7142 }
7143 7143 }
7144 7144
7145 7145 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 7146 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 7147 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 7148 DDI_DMA_SYNC_FORCPU);
7149 7149 }
7150 7150
7151 7151 switch (icmd->ipkt_opcode) {
7152 7152 case SCMD_INQUIRY:
7153 7153 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 7154 fcp_handle_inquiry(fpkt, icmd);
7155 7155 break;
7156 7156
7157 7157 case SCMD_REPORT_LUN:
7158 7158 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 7159 FCP_TGT_TRACE_22);
7160 7160 fcp_handle_reportlun(fpkt, icmd);
7161 7161 break;
7162 7162
7163 7163 case SCMD_INQUIRY_PAGE83:
7164 7164 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 7165 (void) fcp_handle_page83(fpkt, icmd, 0);
7166 7166 break;
7167 7167
7168 7168 default:
7169 7169 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 7170 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 7171 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 7172 fcp_icmd_free(pptr, icmd);
7173 7173 break;
7174 7174 }
7175 7175 }
7176 7176
7177 7177
7178 7178 static void
7179 7179 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 7180 {
7181 7181 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7182 7182 fpkt->pkt_ulp_private;
7183 7183 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7184 7184 struct fcp_port *pptr = ptgt->tgt_port;
7185 7185
7186 7186 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 7187 fcp_is_retryable(icmd)) {
7188 7188 mutex_enter(&pptr->port_mutex);
7189 7189 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 7190 mutex_exit(&pptr->port_mutex);
7191 7191 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 7192 fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 7193 "Retrying %s to %x; state=%x, reason=%x",
7194 7194 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 7195 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 7196 fpkt->pkt_state, fpkt->pkt_reason);
7197 7197
7198 7198 fcp_queue_ipkt(pptr, fpkt);
7199 7199 } else {
7200 7200 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 7201 fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 7202 "fcp_retry_scsi_cmd,1: state change occured"
7203 7203 " for D_ID=0x%x", ptgt->tgt_d_id);
7204 7204 mutex_exit(&pptr->port_mutex);
7205 7205 (void) fcp_call_finish_init(pptr, ptgt,
7206 7206 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 7207 icmd->ipkt_cause);
7208 7208 fcp_icmd_free(pptr, icmd);
7209 7209 }
7210 7210 } else {
7211 7211 fcp_print_error(fpkt);
7212 7212 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 7213 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 7214 fcp_icmd_free(pptr, icmd);
7215 7215 }
7216 7216 }
7217 7217
7218 7218 /*
7219 7219 * Function: fcp_handle_page83
7220 7220 *
7221 7221 * Description: Treats the response to INQUIRY_PAGE83.
7222 7222 *
7223 7223 * Argument: *fpkt FC packet used to convey the command.
7224 7224 * *icmd Original fcp_ipkt structure.
7225 7225 * ignore_page83_data
7226 7226 * if it's 1, that means it's a special devices's
7227 7227 * page83 response, it should be enumerated under mpxio
7228 7228 *
7229 7229 * Return Value: None
7230 7230 */
7231 7231 static void
7232 7232 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233 7233 int ignore_page83_data)
7234 7234 {
7235 7235 struct fcp_port *pptr;
7236 7236 struct fcp_lun *plun;
7237 7237 struct fcp_tgt *ptgt;
7238 7238 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 7239 int fail = 0;
7240 7240 ddi_devid_t devid;
7241 7241 char *guid = NULL;
7242 7242 int ret;
7243 7243
7244 7244 ASSERT(icmd != NULL && fpkt != NULL);
7245 7245
7246 7246 pptr = icmd->ipkt_port;
7247 7247 ptgt = icmd->ipkt_tgt;
7248 7248 plun = icmd->ipkt_lun;
7249 7249
7250 7250 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 7251 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 7252
7253 7253 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 7254 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 7255
7256 7256 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 7257 fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 7258 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 7259 "dtype=0x%x, lun num=%x",
7260 7260 pptr->port_instance, ptgt->tgt_d_id,
7261 7261 dev_id_page[0], plun->lun_num);
7262 7262
7263 7263 ret = ddi_devid_scsi_encode(
7264 7264 DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 7265 NULL, /* driver name */
7266 7266 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 7267 sizeof (plun->lun_inq), /* size of standard inquiry */
7268 7268 NULL, /* page 80 data */
7269 7269 0, /* page 80 len */
7270 7270 dev_id_page, /* page 83 data */
7271 7271 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 7272 &devid);
7273 7273
7274 7274 if (ret == DDI_SUCCESS) {
7275 7275
7276 7276 guid = ddi_devid_to_guid(devid);
7277 7277
7278 7278 if (guid) {
7279 7279 /*
7280 7280 * Check our current guid. If it's non null
7281 7281 * and it has changed, we need to copy it into
7282 7282 * lun_old_guid since we might still need it.
7283 7283 */
7284 7284 if (plun->lun_guid &&
7285 7285 strcmp(guid, plun->lun_guid)) {
7286 7286 unsigned int len;
7287 7287
7288 7288 /*
7289 7289 * If the guid of the LUN changes,
7290 7290 * reconfiguration should be triggered
7291 7291 * to reflect the changes.
7292 7292 * i.e. we should offline the LUN with
7293 7293 * the old guid, and online the LUN with
7294 7294 * the new guid.
7295 7295 */
7296 7296 plun->lun_state |= FCP_LUN_CHANGED;
7297 7297
7298 7298 if (plun->lun_old_guid) {
7299 7299 kmem_free(plun->lun_old_guid,
7300 7300 plun->lun_old_guid_size);
7301 7301 }
7302 7302
7303 7303 len = plun->lun_guid_size;
7304 7304 plun->lun_old_guid_size = len;
7305 7305
7306 7306 plun->lun_old_guid = kmem_zalloc(len,
7307 7307 KM_NOSLEEP);
7308 7308
7309 7309 if (plun->lun_old_guid) {
7310 7310 /*
7311 7311 * The alloc was successful then
7312 7312 * let's do the copy.
7313 7313 */
7314 7314 bcopy(plun->lun_guid,
7315 7315 plun->lun_old_guid, len);
7316 7316 } else {
7317 7317 fail = 1;
7318 7318 plun->lun_old_guid_size = 0;
7319 7319 }
7320 7320 }
7321 7321 if (!fail) {
7322 7322 if (fcp_copy_guid_2_lun_block(
7323 7323 plun, guid)) {
7324 7324 fail = 1;
7325 7325 }
7326 7326 }
7327 7327 ddi_devid_free_guid(guid);
7328 7328
7329 7329 } else {
7330 7330 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 7331 fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 7332 "fcp_handle_page83: unable to create "
7333 7333 "GUID");
7334 7334
7335 7335 /* couldn't create good guid from devid */
7336 7336 fail = 1;
7337 7337 }
7338 7338 ddi_devid_free(devid);
7339 7339
7340 7340 } else if (ret == DDI_NOT_WELL_FORMED) {
7341 7341 /* NULL filled data for page 83 */
7342 7342 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 7343 fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 7344 "fcp_handle_page83: retry GUID");
7345 7345
7346 7346 icmd->ipkt_retries = 0;
7347 7347 fcp_retry_scsi_cmd(fpkt);
7348 7348 return;
7349 7349 } else {
7350 7350 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7351 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7352 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 7353 ret);
7354 7354 /*
7355 7355 * Since the page83 validation
7356 7356 * introduced late, we are being
7357 7357 * tolerant to the existing devices
7358 7358 * that already found to be working
7359 7359 * under mpxio, like A5200's SES device,
7360 7360 * its page83 response will not be standard-compliant,
7361 7361 * but we still want it to be enumerated under mpxio.
7362 7362 */
7363 7363 if (fcp_symmetric_device_probe(plun) != 0) {
7364 7364 fail = 1;
7365 7365 }
7366 7366 }
7367 7367
7368 7368 } else {
7369 7369 /* bad packet state */
7370 7370 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 7371
7372 7372 /*
7373 7373 * For some special devices (A5K SES and Daktari's SES devices),
7374 7374 * they should be enumerated under mpxio
7375 7375 * or "luxadm dis" will fail
7376 7376 */
7377 7377 if (ignore_page83_data) {
7378 7378 fail = 0;
7379 7379 } else {
7380 7380 fail = 1;
7381 7381 }
7382 7382 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 7383 fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 7384 "!Devid page cmd failed. "
7385 7385 "fpkt_state: %x fpkt_reason: %x",
7386 7386 "ignore_page83: %d",
7387 7387 fpkt->pkt_state, fpkt->pkt_reason,
7388 7388 ignore_page83_data);
7389 7389 }
7390 7390
7391 7391 mutex_enter(&pptr->port_mutex);
7392 7392 mutex_enter(&plun->lun_mutex);
7393 7393 /*
7394 7394 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 7395 * mismatch between lun_cip and lun_mpxio.
7396 7396 */
7397 7397 if (plun->lun_cip == NULL) {
7398 7398 /*
7399 7399 * If we don't have a guid for this lun it's because we were
7400 7400 * unable to glean one from the page 83 response. Set the
7401 7401 * control flag to 0 here to make sure that we don't attempt to
7402 7402 * enumerate it under mpxio.
7403 7403 */
7404 7404 if (fail || pptr->port_mpxio == 0) {
7405 7405 plun->lun_mpxio = 0;
7406 7406 } else {
7407 7407 plun->lun_mpxio = 1;
7408 7408 }
7409 7409 }
7410 7410 mutex_exit(&plun->lun_mutex);
7411 7411 mutex_exit(&pptr->port_mutex);
7412 7412
7413 7413 mutex_enter(&ptgt->tgt_mutex);
7414 7414 plun->lun_state &=
7415 7415 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 7416 mutex_exit(&ptgt->tgt_mutex);
7417 7417
7418 7418 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 7419 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 7420
7421 7421 fcp_icmd_free(pptr, icmd);
7422 7422 }
7423 7423
7424 7424 /*
7425 7425 * Function: fcp_handle_inquiry
7426 7426 *
7427 7427 * Description: Called by fcp_scsi_callback to handle the response to an
7428 7428 * INQUIRY request.
7429 7429 *
7430 7430 * Argument: *fpkt FC packet used to convey the command.
7431 7431 * *icmd Original fcp_ipkt structure.
7432 7432 *
7433 7433 * Return Value: None
7434 7434 */
7435 7435 static void
7436 7436 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 7437 {
7438 7438 struct fcp_port *pptr;
7439 7439 struct fcp_lun *plun;
7440 7440 struct fcp_tgt *ptgt;
7441 7441 uchar_t dtype;
7442 7442 uchar_t pqual;
7443 7443 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7444 7444
7445 7445 ASSERT(icmd != NULL && fpkt != NULL);
7446 7446
7447 7447 pptr = icmd->ipkt_port;
7448 7448 ptgt = icmd->ipkt_tgt;
7449 7449 plun = icmd->ipkt_lun;
7450 7450
7451 7451 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 7452 sizeof (struct scsi_inquiry));
7453 7453
7454 7454 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 7455 pqual = plun->lun_inq.inq_dtype >> 5;
7456 7456
7457 7457 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 7458 fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 7459 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 7460 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 7461 plun->lun_num, dtype, pqual);
7462 7462
7463 7463 if (pqual != 0) {
7464 7464 /*
7465 7465 * Non-zero peripheral qualifier
7466 7466 */
7467 7467 fcp_log(CE_CONT, pptr->port_dip,
7468 7468 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 7469 "Device type=0x%x Peripheral qual=0x%x\n",
7470 7470 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 7471
7472 7472 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 7473 fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 7474 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 7475 "Device type=0x%x Peripheral qual=0x%x\n",
7476 7476 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 7477
7478 7478 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 7479
7480 7480 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 7481 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 7482 fcp_icmd_free(pptr, icmd);
7483 7483 return;
7484 7484 }
7485 7485
7486 7486 /*
7487 7487 * If the device is already initialized, check the dtype
7488 7488 * for a change. If it has changed then update the flags
7489 7489 * so the create_luns will offline the old device and
7490 7490 * create the new device. Refer to bug: 4764752
7491 7491 */
7492 7492 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 7493 plun->lun_state |= FCP_LUN_CHANGED;
7494 7494 }
7495 7495 plun->lun_type = plun->lun_inq.inq_dtype;
7496 7496
7497 7497 /*
7498 7498 * This code is setting/initializing the throttling in the FCA
7499 7499 * driver.
7500 7500 */
7501 7501 mutex_enter(&pptr->port_mutex);
7502 7502 if (!pptr->port_notify) {
7503 7503 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 7504 uint32_t cmd = 0;
7505 7505 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 7506 ((cmd & 0xFFFFFF00 >> 8) |
7507 7507 FCP_SVE_THROTTLE << 8));
7508 7508 pptr->port_notify = 1;
7509 7509 mutex_exit(&pptr->port_mutex);
7510 7510 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 7511 mutex_enter(&pptr->port_mutex);
7512 7512 }
7513 7513 }
7514 7514
7515 7515 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 7516 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 7517 fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 7518 "fcp_handle_inquiry,1:state change occured"
7519 7519 " for D_ID=0x%x", ptgt->tgt_d_id);
7520 7520 mutex_exit(&pptr->port_mutex);
7521 7521
7522 7522 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 7523 (void) fcp_call_finish_init(pptr, ptgt,
7524 7524 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 7525 icmd->ipkt_cause);
7526 7526 fcp_icmd_free(pptr, icmd);
7527 7527 return;
7528 7528 }
7529 7529 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 7530 mutex_exit(&pptr->port_mutex);
7531 7531
7532 7532 /* Retrieve the rscn count (if a valid one exists) */
7533 7533 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 7534 rscn_count = ((fc_ulp_rscn_info_t *)
7535 7535 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 7536 } else {
7537 7537 rscn_count = FC_INVALID_RSCN_COUNT;
7538 7538 }
7539 7539
7540 7540 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 7541 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 7542 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 7543 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 7544 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 7545 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 7546 (void) fcp_call_finish_init(pptr, ptgt,
7547 7547 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 7548 icmd->ipkt_cause);
7549 7549 }
7550 7550
7551 7551 /*
7552 7552 * Read Inquiry VPD Page 0x83 to uniquely
7553 7553 * identify this logical unit.
7554 7554 */
7555 7555 fcp_icmd_free(pptr, icmd);
7556 7556 }
7557 7557
7558 7558 /*
7559 7559 * Function: fcp_handle_reportlun
7560 7560 *
7561 7561 * Description: Called by fcp_scsi_callback to handle the response to a
7562 7562 * REPORT_LUN request.
7563 7563 *
7564 7564 * Argument: *fpkt FC packet used to convey the command.
7565 7565 * *icmd Original fcp_ipkt structure.
7566 7566 *
7567 7567 * Return Value: None
7568 7568 */
7569 7569 static void
7570 7570 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 7571 {
7572 7572 int i;
7573 7573 int nluns_claimed;
7574 7574 int nluns_bufmax;
7575 7575 int len;
7576 7576 uint16_t lun_num;
7577 7577 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7578 7578 struct fcp_port *pptr;
7579 7579 struct fcp_tgt *ptgt;
7580 7580 struct fcp_lun *plun;
7581 7581 struct fcp_reportlun_resp *report_lun;
7582 7582
7583 7583 pptr = icmd->ipkt_port;
7584 7584 ptgt = icmd->ipkt_tgt;
7585 7585 len = fpkt->pkt_datalen;
7586 7586
7587 7587 if ((len < FCP_LUN_HEADER) ||
7588 7588 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 7589 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 7590 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 7591 fcp_icmd_free(pptr, icmd);
7592 7592 return;
7593 7593 }
7594 7594
7595 7595 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 7596 fpkt->pkt_datalen);
7597 7597
7598 7598 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 7599 fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 7600 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 7601 pptr->port_instance, ptgt->tgt_d_id);
7602 7602
7603 7603 /*
7604 7604 * Get the number of luns (which is supplied as LUNS * 8) the
7605 7605 * device claims it has.
7606 7606 */
7607 7607 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 7608
7609 7609 /*
7610 7610 * Get the maximum number of luns the buffer submitted can hold.
7611 7611 */
7612 7612 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 7613
7614 7614 /*
7615 7615 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 7616 */
7617 7617 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 7618 kmem_free(report_lun, len);
7619 7619
7620 7620 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 7621 " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 7622 ptgt->tgt_d_id);
7623 7623
7624 7624 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 7625 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 7626 fcp_icmd_free(pptr, icmd);
7627 7627 return;
7628 7628 }
7629 7629
7630 7630 /*
7631 7631 * If there are more LUNs than we have allocated memory for,
7632 7632 * allocate more space and send down yet another report lun if
7633 7633 * the maximum number of attempts hasn't been reached.
7634 7634 */
7635 7635 mutex_enter(&ptgt->tgt_mutex);
7636 7636
7637 7637 if ((nluns_claimed > nluns_bufmax) &&
7638 7638 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 7639
7640 7640 struct fcp_lun *plun;
7641 7641
7642 7642 ptgt->tgt_report_lun_cnt++;
7643 7643 plun = ptgt->tgt_lun;
7644 7644 ASSERT(plun != NULL);
7645 7645 mutex_exit(&ptgt->tgt_mutex);
7646 7646
7647 7647 kmem_free(report_lun, len);
7648 7648
7649 7649 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 7650 fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 7651 "!Dynamically discovered %d LUNs for D_ID=%x",
7652 7652 nluns_claimed, ptgt->tgt_d_id);
7653 7653
7654 7654 /* Retrieve the rscn count (if a valid one exists) */
7655 7655 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 7656 rscn_count = ((fc_ulp_rscn_info_t *)
7657 7657 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 7658 ulp_rscn_count;
7659 7659 } else {
7660 7660 rscn_count = FC_INVALID_RSCN_COUNT;
7661 7661 }
7662 7662
7663 7663 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 7664 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 7665 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 7666 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 7667 (void) fcp_call_finish_init(pptr, ptgt,
7668 7668 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 7669 icmd->ipkt_cause);
7670 7670 }
7671 7671
7672 7672 fcp_icmd_free(pptr, icmd);
7673 7673 return;
7674 7674 }
7675 7675
7676 7676 if (nluns_claimed > nluns_bufmax) {
7677 7677 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 7678 fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 7679 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 7680 " Number of LUNs lost=%x",
7681 7681 ptgt->tgt_port_wwn.raw_wwn[0],
7682 7682 ptgt->tgt_port_wwn.raw_wwn[1],
7683 7683 ptgt->tgt_port_wwn.raw_wwn[2],
7684 7684 ptgt->tgt_port_wwn.raw_wwn[3],
7685 7685 ptgt->tgt_port_wwn.raw_wwn[4],
7686 7686 ptgt->tgt_port_wwn.raw_wwn[5],
7687 7687 ptgt->tgt_port_wwn.raw_wwn[6],
7688 7688 ptgt->tgt_port_wwn.raw_wwn[7],
7689 7689 nluns_claimed - nluns_bufmax);
7690 7690
7691 7691 nluns_claimed = nluns_bufmax;
7692 7692 }
7693 7693 ptgt->tgt_lun_cnt = nluns_claimed;
7694 7694
7695 7695 /*
7696 7696 * Identify missing LUNs and print warning messages
7697 7697 */
7698 7698 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 7699 int offline;
7700 7700 int exists = 0;
7701 7701
7702 7702 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 7703
7704 7704 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 7705 uchar_t *lun_string;
7706 7706
7707 7707 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 7708
7709 7709 switch (lun_string[0] & 0xC0) {
7710 7710 case FCP_LUN_ADDRESSING:
7711 7711 case FCP_PD_ADDRESSING:
7712 7712 case FCP_VOLUME_ADDRESSING:
7713 7713 lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 7714 lun_string[1];
7715 7715 if (plun->lun_num == lun_num) {
7716 7716 exists++;
7717 7717 break;
7718 7718 }
7719 7719 break;
7720 7720
7721 7721 default:
7722 7722 break;
7723 7723 }
7724 7724 }
7725 7725
7726 7726 if (!exists && !offline) {
7727 7727 mutex_exit(&ptgt->tgt_mutex);
7728 7728
7729 7729 mutex_enter(&pptr->port_mutex);
7730 7730 mutex_enter(&ptgt->tgt_mutex);
7731 7731 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 7732 /*
7733 7733 * set disappear flag when device was connected
7734 7734 */
7735 7735 if (!(plun->lun_state &
7736 7736 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 7737 plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 7738 }
7739 7739 mutex_exit(&ptgt->tgt_mutex);
7740 7740 mutex_exit(&pptr->port_mutex);
7741 7741 if (!(plun->lun_state &
7742 7742 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 7743 fcp_log(CE_NOTE, pptr->port_dip,
7744 7744 "!Lun=%x for target=%x disappeared",
7745 7745 plun->lun_num, ptgt->tgt_d_id);
7746 7746 }
7747 7747 mutex_enter(&ptgt->tgt_mutex);
7748 7748 } else {
7749 7749 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 7750 fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 7751 "fcp_handle_reportlun,1: state change"
7752 7752 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 7753 mutex_exit(&ptgt->tgt_mutex);
7754 7754 mutex_exit(&pptr->port_mutex);
7755 7755 kmem_free(report_lun, len);
7756 7756 (void) fcp_call_finish_init(pptr, ptgt,
7757 7757 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 7758 icmd->ipkt_cause);
7759 7759 fcp_icmd_free(pptr, icmd);
7760 7760 return;
7761 7761 }
7762 7762 } else if (exists) {
7763 7763 /*
7764 7764 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 7765 * actually exists in REPORT_LUN response
7766 7766 */
7767 7767 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 7768 plun->lun_state &=
7769 7769 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 7770 }
7771 7771 if (offline || plun->lun_num == 0) {
7772 7772 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7773 7773 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 7774 mutex_exit(&ptgt->tgt_mutex);
7775 7775 fcp_log(CE_NOTE, pptr->port_dip,
7776 7776 "!Lun=%x for target=%x reappeared",
7777 7777 plun->lun_num, ptgt->tgt_d_id);
7778 7778 mutex_enter(&ptgt->tgt_mutex);
7779 7779 }
7780 7780 }
7781 7781 }
7782 7782 }
7783 7783
7784 7784 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 7785 mutex_exit(&ptgt->tgt_mutex);
7786 7786
7787 7787 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 7788 fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 7789 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 7790 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 7791
7792 7792 /* scan each lun */
7793 7793 for (i = 0; i < nluns_claimed; i++) {
7794 7794 uchar_t *lun_string;
7795 7795
7796 7796 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 7797
7798 7798 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 7799 fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 7800 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 7801 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 7802 lun_string[0]);
7803 7803
7804 7804 switch (lun_string[0] & 0xC0) {
7805 7805 case FCP_LUN_ADDRESSING:
7806 7806 case FCP_PD_ADDRESSING:
7807 7807 case FCP_VOLUME_ADDRESSING:
7808 7808 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 7809
7810 7810 /* We will skip masked LUNs because of the blacklist. */
7811 7811 if (fcp_lun_blacklist != NULL) {
7812 7812 mutex_enter(&ptgt->tgt_mutex);
7813 7813 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 7814 lun_num) == TRUE) {
7815 7815 ptgt->tgt_lun_cnt--;
7816 7816 mutex_exit(&ptgt->tgt_mutex);
7817 7817 break;
7818 7818 }
7819 7819 mutex_exit(&ptgt->tgt_mutex);
7820 7820 }
7821 7821
7822 7822 /* see if this LUN is already allocated */
7823 7823 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 7824 plun = fcp_alloc_lun(ptgt);
7825 7825 if (plun == NULL) {
7826 7826 fcp_log(CE_NOTE, pptr->port_dip,
7827 7827 "!Lun allocation failed"
7828 7828 " target=%x lun=%x",
7829 7829 ptgt->tgt_d_id, lun_num);
7830 7830 break;
7831 7831 }
7832 7832 }
7833 7833
7834 7834 mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 7835 /* convert to LUN */
7836 7836 plun->lun_addr.ent_addr_0 =
7837 7837 BE_16(*(uint16_t *)&(lun_string[0]));
7838 7838 plun->lun_addr.ent_addr_1 =
7839 7839 BE_16(*(uint16_t *)&(lun_string[2]));
7840 7840 plun->lun_addr.ent_addr_2 =
7841 7841 BE_16(*(uint16_t *)&(lun_string[4]));
7842 7842 plun->lun_addr.ent_addr_3 =
7843 7843 BE_16(*(uint16_t *)&(lun_string[6]));
7844 7844
7845 7845 plun->lun_num = lun_num;
7846 7846 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 7847 plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 7848 mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 7849
7850 7850 /* Retrieve the rscn count (if a valid one exists) */
7851 7851 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 7852 rscn_count = ((fc_ulp_rscn_info_t *)
7853 7853 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 7854 ulp_rscn_count;
7855 7855 } else {
7856 7856 rscn_count = FC_INVALID_RSCN_COUNT;
7857 7857 }
7858 7858
7859 7859 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 7860 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 7861 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 7862 mutex_enter(&pptr->port_mutex);
7863 7863 mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 7864 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 7865 fcp_log(CE_NOTE, pptr->port_dip,
7866 7866 "!failed to send INQUIRY"
7867 7867 " target=%x lun=%x",
7868 7868 ptgt->tgt_d_id, plun->lun_num);
7869 7869 } else {
7870 7870 FCP_TRACE(fcp_logq,
7871 7871 pptr->port_instbuf, fcp_trace,
7872 7872 FCP_BUF_LEVEL_5, 0,
7873 7873 "fcp_handle_reportlun,2: state"
7874 7874 " change occured for D_ID=0x%x",
7875 7875 ptgt->tgt_d_id);
7876 7876 }
7877 7877 mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 7878 mutex_exit(&pptr->port_mutex);
7879 7879 } else {
7880 7880 continue;
7881 7881 }
7882 7882 break;
7883 7883
7884 7884 default:
7885 7885 fcp_log(CE_WARN, NULL,
7886 7886 "!Unsupported LUN Addressing method %x "
7887 7887 "in response to REPORT_LUN", lun_string[0]);
7888 7888 break;
7889 7889 }
7890 7890
7891 7891 /*
7892 7892 * each time through this loop we should decrement
7893 7893 * the tmp_cnt by one -- since we go through this loop
7894 7894 * one time for each LUN, the tmp_cnt should never be <=0
7895 7895 */
7896 7896 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 7897 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 7898 }
7899 7899
7900 7900 if (i == 0) {
7901 7901 fcp_log(CE_WARN, pptr->port_dip,
7902 7902 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 7903 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 7904 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 7905 }
7906 7906
7907 7907 kmem_free(report_lun, len);
7908 7908 fcp_icmd_free(pptr, icmd);
7909 7909 }
7910 7910
7911 7911
7912 7912 /*
7913 7913 * called internally to return a LUN given a target and a LUN number
7914 7914 */
7915 7915 static struct fcp_lun *
7916 7916 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 7917 {
7918 7918 struct fcp_lun *plun;
7919 7919
7920 7920 mutex_enter(&ptgt->tgt_mutex);
7921 7921 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 7922 if (plun->lun_num == lun_num) {
7923 7923 mutex_exit(&ptgt->tgt_mutex);
7924 7924 return (plun);
7925 7925 }
7926 7926 }
7927 7927 mutex_exit(&ptgt->tgt_mutex);
7928 7928
7929 7929 return (NULL);
7930 7930 }
7931 7931
7932 7932
7933 7933 /*
7934 7934 * handle finishing one target for fcp_finish_init
7935 7935 *
7936 7936 * return true (non-zero) if we want finish_init to continue with the
7937 7937 * next target
7938 7938 *
7939 7939 * called with the port mutex held
7940 7940 */
7941 7941 /*ARGSUSED*/
7942 7942 static int
7943 7943 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944 7944 int link_cnt, int tgt_cnt, int cause)
7945 7945 {
7946 7946 int rval = 1;
7947 7947 ASSERT(pptr != NULL);
7948 7948 ASSERT(ptgt != NULL);
7949 7949
7950 7950 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 7951 fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 7952 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 7953 ptgt->tgt_state);
7954 7954
7955 7955 ASSERT(mutex_owned(&pptr->port_mutex));
7956 7956
7957 7957 if ((pptr->port_link_cnt != link_cnt) ||
7958 7958 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 7959 /*
7960 7960 * oh oh -- another link reset or target change
7961 7961 * must have occurred while we are in here
7962 7962 */
7963 7963 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 7964
7965 7965 return (0);
7966 7966 } else {
7967 7967 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 7968 }
7969 7969
7970 7970 mutex_enter(&ptgt->tgt_mutex);
7971 7971
7972 7972 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 7973 /*
7974 7974 * tgt is not offline -- is it marked (i.e. needs
7975 7975 * to be offlined) ??
7976 7976 */
7977 7977 if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 7978 /*
7979 7979 * this target not offline *and*
7980 7980 * marked
7981 7981 */
7982 7982 ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 7983 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 7984 tgt_cnt, 0, 0);
7985 7985 } else {
7986 7986 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 7987
7988 7988 /* create the LUNs */
7989 7989 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 7990 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 7991 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 7992 cause);
7993 7993 ptgt->tgt_device_created = 1;
7994 7994 } else {
7995 7995 fcp_update_tgt_state(ptgt, FCP_RESET,
7996 7996 FCP_LUN_BUSY);
7997 7997 }
7998 7998 }
7999 7999 }
8000 8000
8001 8001 mutex_exit(&ptgt->tgt_mutex);
8002 8002
8003 8003 return (rval);
8004 8004 }
8005 8005
8006 8006
8007 8007 /*
8008 8008 * this routine is called to finish port initialization
8009 8009 *
8010 8010 * Each port has a "temp" counter -- when a state change happens (e.g.
8011 8011 * port online), the temp count is set to the number of devices in the map.
8012 8012 * Then, as each device gets "discovered", the temp counter is decremented
8013 8013 * by one. When this count reaches zero we know that all of the devices
8014 8014 * in the map have been discovered (or an error has occurred), so we can
8015 8015 * then finish initialization -- which is done by this routine (well, this
8016 8016 * and fcp-finish_tgt())
8017 8017 *
8018 8018 * acquires and releases the global mutex
8019 8019 *
8020 8020 * called with the port mutex owned
8021 8021 */
8022 8022 static void
8023 8023 fcp_finish_init(struct fcp_port *pptr)
8024 8024 {
8025 8025 #ifdef DEBUG
8026 8026 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 8027 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 8028 FCP_STACK_DEPTH);
8029 8029 #endif /* DEBUG */
8030 8030
8031 8031 ASSERT(mutex_owned(&pptr->port_mutex));
8032 8032
8033 8033 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 8034 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 8035 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 8036
8037 8037 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 8038 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 8039 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 8040 pptr->port_state &= ~FCP_STATE_ONLINING;
8041 8041 pptr->port_state |= FCP_STATE_ONLINE;
8042 8042 }
8043 8043
8044 8044 /* Wake up threads waiting on config done */
8045 8045 cv_broadcast(&pptr->port_config_cv);
8046 8046 }
8047 8047
8048 8048
8049 8049 /*
8050 8050 * called from fcp_finish_init to create the LUNs for a target
8051 8051 *
8052 8052 * called with the port mutex owned
8053 8053 */
8054 8054 static void
8055 8055 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 8056 {
8057 8057 struct fcp_lun *plun;
8058 8058 struct fcp_port *pptr;
8059 8059 child_info_t *cip = NULL;
8060 8060
8061 8061 ASSERT(ptgt != NULL);
8062 8062 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 8063
8064 8064 pptr = ptgt->tgt_port;
8065 8065
8066 8066 ASSERT(pptr != NULL);
8067 8067
8068 8068 /* scan all LUNs for this target */
8069 8069 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 8070 if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 8071 continue;
8072 8072 }
8073 8073
8074 8074 if (plun->lun_state & FCP_LUN_MARK) {
8075 8075 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 8076 fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 8077 "fcp_create_luns: offlining marked LUN!");
8078 8078 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 8079 continue;
8080 8080 }
8081 8081
8082 8082 plun->lun_state &= ~FCP_LUN_BUSY;
8083 8083
8084 8084 /*
8085 8085 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 8086 * but we have a valid plun->lun_cip. To cover this case also
8087 8087 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 8088 */
8089 8089 if (plun->lun_mpxio && plun->lun_cip &&
8090 8090 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 8091 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 8092 0, 0))) {
8093 8093 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 8094 fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 8095 "fcp_create_luns: enable lun %p failed!",
8096 8096 plun);
8097 8097 }
8098 8098
8099 8099 if (plun->lun_state & FCP_LUN_INIT &&
8100 8100 !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 8101 continue;
8102 8102 }
8103 8103
8104 8104 if (cause == FCP_CAUSE_USER_CREATE) {
8105 8105 continue;
8106 8106 }
8107 8107
8108 8108 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 8109 fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 8110 "create_luns: passing ONLINE elem to HP thread");
8111 8111
8112 8112 /*
8113 8113 * If lun has changed, prepare for offlining the old path.
8114 8114 * Do not offline the old path right now, since it may be
8115 8115 * still opened.
8116 8116 */
8117 8117 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 8118 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 8119 }
8120 8120
8121 8121 /* pass an ONLINE element to the hotplug thread */
8122 8122 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 8123 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 8124
8125 8125 /*
8126 8126 * We can not synchronous attach (i.e pass
8127 8127 * NDI_ONLINE_ATTACH) here as we might be
8128 8128 * coming from an interrupt or callback
8129 8129 * thread.
8130 8130 */
8131 8131 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 8132 link_cnt, tgt_cnt, 0, 0)) {
8133 8133 fcp_log(CE_CONT, pptr->port_dip,
8134 8134 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 8135 plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 8136 }
8137 8137 }
8138 8138 }
8139 8139 }
8140 8140
8141 8141
8142 8142 /*
8143 8143 * function to online/offline devices
8144 8144 */
8145 8145 static int
8146 8146 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147 8147 int online, int lcount, int tcount, int flags)
8148 8148 {
8149 8149 int rval = NDI_FAILURE;
8150 8150 int circ;
8151 8151 child_info_t *ccip;
8152 8152 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8153 8153 int is_mpxio = pptr->port_mpxio;
8154 8154 dev_info_t *cdip, *pdip;
8155 8155 char *devname;
8156 8156
8157 8157 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 8158 /*
8159 8159 * When this event gets serviced, lun_cip and lun_mpxio
8160 8160 * has changed, so it should be invalidated now.
8161 8161 */
8162 8162 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 8163 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 8164 "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 8165 return (rval);
8166 8166 }
8167 8167
8168 8168 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 8169 fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 8170 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 8171 "flags=%x mpxio=%x\n",
8172 8172 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 8173 plun->lun_mpxio);
8174 8174
8175 8175 /*
8176 8176 * lun_mpxio needs checking here because we can end up in a race
8177 8177 * condition where this task has been dispatched while lun_mpxio is
8178 8178 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 8179 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 8180 * the flag. We rely on the serialization of the tasks here. We return
8181 8181 * NDI_SUCCESS so any callers continue without reporting spurious
8182 8182 * errors, and the still think we're an MPXIO LUN.
8183 8183 */
8184 8184
8185 8185 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 8186 online == FCP_MPXIO_PATH_SET_BUSY) {
8187 8187 if (plun->lun_mpxio) {
8188 8188 rval = fcp_update_mpxio_path(plun, cip, online);
8189 8189 } else {
8190 8190 rval = NDI_SUCCESS;
8191 8191 }
8192 8192 return (rval);
8193 8193 }
8194 8194
8195 8195 /*
8196 8196 * Explicit devfs_clean() due to ndi_devi_offline() not
8197 8197 * executing devfs_clean() if parent lock is held.
8198 8198 */
8199 8199 ASSERT(!servicing_interrupt());
8200 8200 if (online == FCP_OFFLINE) {
8201 8201 if (plun->lun_mpxio == 0) {
8202 8202 if (plun->lun_cip == cip) {
8203 8203 cdip = DIP(plun->lun_cip);
8204 8204 } else {
8205 8205 cdip = DIP(cip);
8206 8206 }
8207 8207 } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 8208 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 8209 } else if ((plun->lun_cip != cip) && cip) {
8210 8210 /*
8211 8211 * This means a DTYPE/GUID change, we shall get the
8212 8212 * dip of the old cip instead of the current lun_cip.
8213 8213 */
8214 8214 cdip = mdi_pi_get_client(PIP(cip));
8215 8215 }
8216 8216 if (cdip) {
8217 8217 if (i_ddi_devi_attached(cdip)) {
8218 8218 pdip = ddi_get_parent(cdip);
8219 8219 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 8220 ndi_devi_enter(pdip, &circ);
8221 8221 (void) ddi_deviname(cdip, devname);
8222 8222 ndi_devi_exit(pdip, circ);
8223 8223 /*
8224 8224 * Release parent lock before calling
8225 8225 * devfs_clean().
8226 8226 */
8227 8227 rval = devfs_clean(pdip, devname + 1,
8228 8228 DV_CLEAN_FORCE);
8229 8229 kmem_free(devname, MAXNAMELEN + 1);
8230 8230 /*
8231 8231 * Return if devfs_clean() fails for
8232 8232 * non-MPXIO case.
8233 8233 * For MPXIO case, another path could be
8234 8234 * offlined.
8235 8235 */
8236 8236 if (rval && plun->lun_mpxio == 0) {
8237 8237 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8238 8238 fcp_trace, FCP_BUF_LEVEL_3, 0,
8239 8239 "fcp_trigger_lun: devfs_clean "
8240 8240 "failed rval=%x dip=%p",
8241 8241 rval, pdip);
8242 8242 return (NDI_FAILURE);
8243 8243 }
8244 8244 }
8245 8245 }
8246 8246 }
8247 8247
8248 8248 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8249 8249 return (NDI_FAILURE);
8250 8250 }
8251 8251
8252 8252 if (is_mpxio) {
8253 8253 mdi_devi_enter(pptr->port_dip, &circ);
8254 8254 } else {
8255 8255 ndi_devi_enter(pptr->port_dip, &circ);
8256 8256 }
8257 8257
8258 8258 mutex_enter(&pptr->port_mutex);
8259 8259 mutex_enter(&plun->lun_mutex);
8260 8260
8261 8261 if (online == FCP_ONLINE) {
8262 8262 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8263 8263 if (ccip == NULL) {
8264 8264 goto fail;
8265 8265 }
8266 8266 } else {
8267 8267 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8268 8268 goto fail;
8269 8269 }
8270 8270 ccip = cip;
8271 8271 }
8272 8272
8273 8273 if (online == FCP_ONLINE) {
8274 8274 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8275 8275 &circ);
8276 8276 fc_ulp_log_device_event(pptr->port_fp_handle,
8277 8277 FC_ULP_DEVICE_ONLINE);
8278 8278 } else {
8279 8279 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8280 8280 &circ);
8281 8281 fc_ulp_log_device_event(pptr->port_fp_handle,
8282 8282 FC_ULP_DEVICE_OFFLINE);
8283 8283 }
8284 8284
8285 8285 fail: mutex_exit(&plun->lun_mutex);
8286 8286 mutex_exit(&pptr->port_mutex);
8287 8287
8288 8288 if (is_mpxio) {
8289 8289 mdi_devi_exit(pptr->port_dip, circ);
8290 8290 } else {
8291 8291 ndi_devi_exit(pptr->port_dip, circ);
8292 8292 }
8293 8293
8294 8294 fc_ulp_idle_port(pptr->port_fp_handle);
8295 8295
8296 8296 return (rval);
8297 8297 }
8298 8298
8299 8299
8300 8300 /*
8301 8301 * take a target offline by taking all of its LUNs offline
8302 8302 */
8303 8303 /*ARGSUSED*/
8304 8304 static int
8305 8305 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8306 8306 int link_cnt, int tgt_cnt, int nowait, int flags)
8307 8307 {
8308 8308 struct fcp_tgt_elem *elem;
8309 8309
8310 8310 ASSERT(mutex_owned(&pptr->port_mutex));
8311 8311 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8312 8312
8313 8313 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8314 8314
8315 8315 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8316 8316 ptgt->tgt_change_cnt)) {
8317 8317 mutex_exit(&ptgt->tgt_mutex);
8318 8318 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8319 8319 mutex_enter(&ptgt->tgt_mutex);
8320 8320
8321 8321 return (0);
8322 8322 }
8323 8323
8324 8324 ptgt->tgt_pd_handle = NULL;
8325 8325 mutex_exit(&ptgt->tgt_mutex);
8326 8326 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8327 8327 mutex_enter(&ptgt->tgt_mutex);
8328 8328
8329 8329 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8330 8330
8331 8331 if (ptgt->tgt_tcap &&
8332 8332 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8333 8333 elem->flags = flags;
8334 8334 elem->time = fcp_watchdog_time;
8335 8335 if (nowait == 0) {
8336 8336 elem->time += fcp_offline_delay;
8337 8337 }
8338 8338 elem->ptgt = ptgt;
8339 8339 elem->link_cnt = link_cnt;
8340 8340 elem->tgt_cnt = tgt_cnt;
8341 8341 elem->next = pptr->port_offline_tgts;
8342 8342 pptr->port_offline_tgts = elem;
8343 8343 } else {
8344 8344 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8345 8345 }
8346 8346
8347 8347 return (1);
8348 8348 }
8349 8349
8350 8350
8351 8351 static void
8352 8352 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8353 8353 int link_cnt, int tgt_cnt, int flags)
8354 8354 {
8355 8355 ASSERT(mutex_owned(&pptr->port_mutex));
8356 8356 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8357 8357
8358 8358 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8359 8359 ptgt->tgt_state = FCP_TGT_OFFLINE;
8360 8360 ptgt->tgt_pd_handle = NULL;
8361 8361 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8362 8362 }
8363 8363
8364 8364
8365 8365 static void
8366 8366 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8367 8367 int flags)
8368 8368 {
8369 8369 struct fcp_lun *plun;
8370 8370
8371 8371 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8372 8372 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8373 8373
8374 8374 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8375 8375 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8376 8376 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8377 8377 }
8378 8378 }
8379 8379 }
8380 8380
8381 8381
8382 8382 /*
8383 8383 * take a LUN offline
8384 8384 *
8385 8385 * enters and leaves with the target mutex held, releasing it in the process
8386 8386 *
8387 8387 * allocates memory in non-sleep mode
8388 8388 */
8389 8389 static void
8390 8390 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8391 8391 int nowait, int flags)
8392 8392 {
8393 8393 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8394 8394 struct fcp_lun_elem *elem;
8395 8395
8396 8396 ASSERT(plun != NULL);
8397 8397 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8398 8398
8399 8399 if (nowait) {
8400 8400 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8401 8401 return;
8402 8402 }
8403 8403
8404 8404 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8405 8405 elem->flags = flags;
8406 8406 elem->time = fcp_watchdog_time;
8407 8407 if (nowait == 0) {
8408 8408 elem->time += fcp_offline_delay;
8409 8409 }
8410 8410 elem->plun = plun;
8411 8411 elem->link_cnt = link_cnt;
8412 8412 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8413 8413 elem->next = pptr->port_offline_luns;
8414 8414 pptr->port_offline_luns = elem;
8415 8415 } else {
8416 8416 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8417 8417 }
8418 8418 }
8419 8419
8420 8420
8421 8421 static void
8422 8422 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8423 8423 {
8424 8424 struct fcp_pkt *head = NULL;
8425 8425
8426 8426 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8427 8427
8428 8428 mutex_exit(&LUN_TGT->tgt_mutex);
8429 8429
8430 8430 head = fcp_scan_commands(plun);
8431 8431 if (head != NULL) {
8432 8432 fcp_abort_commands(head, LUN_PORT);
8433 8433 }
8434 8434
8435 8435 mutex_enter(&LUN_TGT->tgt_mutex);
8436 8436
8437 8437 if (plun->lun_cip && plun->lun_mpxio) {
8438 8438 /*
8439 8439 * Intimate MPxIO lun busy is cleared
8440 8440 */
8441 8441 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8442 8442 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8443 8443 0, 0)) {
8444 8444 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8445 8445 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8446 8446 LUN_TGT->tgt_d_id, plun->lun_num);
8447 8447 }
8448 8448 /*
8449 8449 * Intimate MPxIO that the lun is now marked for offline
8450 8450 */
8451 8451 mutex_exit(&LUN_TGT->tgt_mutex);
8452 8452 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8453 8453 mutex_enter(&LUN_TGT->tgt_mutex);
8454 8454 }
8455 8455 }
8456 8456
8457 8457 static void
8458 8458 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8459 8459 int flags)
8460 8460 {
8461 8461 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8462 8462
8463 8463 mutex_exit(&LUN_TGT->tgt_mutex);
8464 8464 fcp_update_offline_flags(plun);
8465 8465 mutex_enter(&LUN_TGT->tgt_mutex);
8466 8466
8467 8467 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8468 8468
8469 8469 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8470 8470 fcp_trace, FCP_BUF_LEVEL_4, 0,
8471 8471 "offline_lun: passing OFFLINE elem to HP thread");
8472 8472
8473 8473 if (plun->lun_cip) {
8474 8474 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8475 8475 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8476 8476 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8477 8477 LUN_TGT->tgt_trace);
8478 8478
8479 8479 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8480 8480 link_cnt, tgt_cnt, flags, 0)) {
8481 8481 fcp_log(CE_CONT, LUN_PORT->port_dip,
8482 8482 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8483 8483 LUN_TGT->tgt_d_id, plun->lun_num);
8484 8484 }
8485 8485 }
8486 8486 }
8487 8487
8488 8488 static void
8489 8489 fcp_scan_offline_luns(struct fcp_port *pptr)
8490 8490 {
8491 8491 struct fcp_lun_elem *elem;
8492 8492 struct fcp_lun_elem *prev;
8493 8493 struct fcp_lun_elem *next;
8494 8494
8495 8495 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8496 8496
8497 8497 prev = NULL;
8498 8498 elem = pptr->port_offline_luns;
8499 8499 while (elem) {
8500 8500 next = elem->next;
8501 8501 if (elem->time <= fcp_watchdog_time) {
8502 8502 int changed = 1;
8503 8503 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8504 8504
8505 8505 mutex_enter(&ptgt->tgt_mutex);
8506 8506 if (pptr->port_link_cnt == elem->link_cnt &&
8507 8507 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8508 8508 changed = 0;
8509 8509 }
8510 8510
8511 8511 if (!changed &&
8512 8512 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8513 8513 fcp_offline_lun_now(elem->plun,
8514 8514 elem->link_cnt, elem->tgt_cnt, elem->flags);
8515 8515 }
8516 8516 mutex_exit(&ptgt->tgt_mutex);
8517 8517
8518 8518 kmem_free(elem, sizeof (*elem));
8519 8519
8520 8520 if (prev) {
8521 8521 prev->next = next;
8522 8522 } else {
8523 8523 pptr->port_offline_luns = next;
8524 8524 }
8525 8525 } else {
8526 8526 prev = elem;
8527 8527 }
8528 8528 elem = next;
8529 8529 }
8530 8530 }
8531 8531
8532 8532
8533 8533 static void
8534 8534 fcp_scan_offline_tgts(struct fcp_port *pptr)
8535 8535 {
8536 8536 struct fcp_tgt_elem *elem;
8537 8537 struct fcp_tgt_elem *prev;
8538 8538 struct fcp_tgt_elem *next;
8539 8539
8540 8540 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8541 8541
8542 8542 prev = NULL;
8543 8543 elem = pptr->port_offline_tgts;
8544 8544 while (elem) {
8545 8545 next = elem->next;
8546 8546 if (elem->time <= fcp_watchdog_time) {
8547 8547 int outdated = 1;
8548 8548 struct fcp_tgt *ptgt = elem->ptgt;
8549 8549
8550 8550 mutex_enter(&ptgt->tgt_mutex);
8551 8551
8552 8552 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8553 8553 /* No change on tgt since elem was created. */
8554 8554 outdated = 0;
8555 8555 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8556 8556 pptr->port_link_cnt == elem->link_cnt + 1 &&
8557 8557 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8558 8558 /*
8559 8559 * Exactly one thing happened to the target
8560 8560 * inbetween: the local port went offline.
8561 8561 * For fp the remote port is already gone so
8562 8562 * it will not tell us again to offline the
8563 8563 * target. We must offline it now.
8564 8564 */
8565 8565 outdated = 0;
8566 8566 }
8567 8567
8568 8568 if (!outdated && !(ptgt->tgt_state &
8569 8569 FCP_TGT_OFFLINE)) {
8570 8570 fcp_offline_target_now(pptr,
8571 8571 ptgt, elem->link_cnt, elem->tgt_cnt,
8572 8572 elem->flags);
8573 8573 }
8574 8574
8575 8575 mutex_exit(&ptgt->tgt_mutex);
8576 8576
8577 8577 kmem_free(elem, sizeof (*elem));
8578 8578
8579 8579 if (prev) {
8580 8580 prev->next = next;
8581 8581 } else {
8582 8582 pptr->port_offline_tgts = next;
8583 8583 }
8584 8584 } else {
8585 8585 prev = elem;
8586 8586 }
8587 8587 elem = next;
8588 8588 }
8589 8589 }
8590 8590
8591 8591
8592 8592 static void
8593 8593 fcp_update_offline_flags(struct fcp_lun *plun)
8594 8594 {
8595 8595 struct fcp_port *pptr = LUN_PORT;
8596 8596 ASSERT(plun != NULL);
8597 8597
8598 8598 mutex_enter(&LUN_TGT->tgt_mutex);
8599 8599 plun->lun_state |= FCP_LUN_OFFLINE;
8600 8600 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8601 8601
8602 8602 mutex_enter(&plun->lun_mutex);
8603 8603 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8604 8604 dev_info_t *cdip = NULL;
8605 8605
8606 8606 mutex_exit(&LUN_TGT->tgt_mutex);
8607 8607
8608 8608 if (plun->lun_mpxio == 0) {
8609 8609 cdip = DIP(plun->lun_cip);
8610 8610 } else if (plun->lun_cip) {
8611 8611 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8612 8612 }
8613 8613
8614 8614 mutex_exit(&plun->lun_mutex);
8615 8615 if (cdip) {
8616 8616 (void) ndi_event_retrieve_cookie(
8617 8617 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8618 8618 &fcp_remove_eid, NDI_EVENT_NOPASS);
8619 8619 (void) ndi_event_run_callbacks(
8620 8620 pptr->port_ndi_event_hdl, cdip,
8621 8621 fcp_remove_eid, NULL);
8622 8622 }
8623 8623 } else {
8624 8624 mutex_exit(&plun->lun_mutex);
8625 8625 mutex_exit(&LUN_TGT->tgt_mutex);
8626 8626 }
8627 8627 }
8628 8628
8629 8629
8630 8630 /*
8631 8631 * Scan all of the command pkts for this port, moving pkts that
8632 8632 * match our LUN onto our own list (headed by "head")
8633 8633 */
8634 8634 static struct fcp_pkt *
8635 8635 fcp_scan_commands(struct fcp_lun *plun)
8636 8636 {
8637 8637 struct fcp_port *pptr = LUN_PORT;
8638 8638
8639 8639 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8640 8640 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8641 8641 struct fcp_pkt *pcmd = NULL; /* the previous command */
8642 8642
8643 8643 struct fcp_pkt *head = NULL; /* head of our list */
8644 8644 struct fcp_pkt *tail = NULL; /* tail of our list */
8645 8645
8646 8646 int cmds_found = 0;
8647 8647
8648 8648 mutex_enter(&pptr->port_pkt_mutex);
8649 8649 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8650 8650 struct fcp_lun *tlun =
8651 8651 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8652 8652
8653 8653 ncmd = cmd->cmd_next; /* set next command */
8654 8654
8655 8655 /*
8656 8656 * if this pkt is for a different LUN or the
8657 8657 * command is sent down, skip it.
8658 8658 */
8659 8659 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8660 8660 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8661 8661 pcmd = cmd;
8662 8662 continue;
8663 8663 }
8664 8664 cmds_found++;
8665 8665 if (pcmd != NULL) {
8666 8666 ASSERT(pptr->port_pkt_head != cmd);
8667 8667 pcmd->cmd_next = cmd->cmd_next;
8668 8668 } else {
8669 8669 ASSERT(cmd == pptr->port_pkt_head);
8670 8670 pptr->port_pkt_head = cmd->cmd_next;
8671 8671 }
8672 8672
8673 8673 if (cmd == pptr->port_pkt_tail) {
8674 8674 pptr->port_pkt_tail = pcmd;
8675 8675 if (pcmd) {
8676 8676 pcmd->cmd_next = NULL;
8677 8677 }
8678 8678 }
8679 8679
8680 8680 if (head == NULL) {
8681 8681 head = tail = cmd;
8682 8682 } else {
8683 8683 ASSERT(tail != NULL);
8684 8684
8685 8685 tail->cmd_next = cmd;
8686 8686 tail = cmd;
8687 8687 }
8688 8688 cmd->cmd_next = NULL;
8689 8689 }
8690 8690 mutex_exit(&pptr->port_pkt_mutex);
8691 8691
8692 8692 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8693 8693 fcp_trace, FCP_BUF_LEVEL_8, 0,
8694 8694 "scan commands: %d cmd(s) found", cmds_found);
8695 8695
8696 8696 return (head);
8697 8697 }
8698 8698
8699 8699
8700 8700 /*
8701 8701 * Abort all the commands in the command queue
8702 8702 */
8703 8703 static void
8704 8704 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8705 8705 {
8706 8706 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8707 8707 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8708 8708
8709 8709 ASSERT(mutex_owned(&pptr->port_mutex));
8710 8710
8711 8711 /* scan through the pkts and invalid them */
8712 8712 for (cmd = head; cmd != NULL; cmd = ncmd) {
8713 8713 struct scsi_pkt *pkt = cmd->cmd_pkt;
8714 8714
8715 8715 ncmd = cmd->cmd_next;
8716 8716 ASSERT(pkt != NULL);
8717 8717
8718 8718 /*
8719 8719 * The lun is going to be marked offline. Indicate
8720 8720 * the target driver not to requeue or retry this command
8721 8721 * as the device is going to be offlined pretty soon.
8722 8722 */
8723 8723 pkt->pkt_reason = CMD_DEV_GONE;
8724 8724 pkt->pkt_statistics = 0;
8725 8725 pkt->pkt_state = 0;
8726 8726
8727 8727 /* reset cmd flags/state */
8728 8728 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8729 8729 cmd->cmd_state = FCP_PKT_IDLE;
8730 8730
8731 8731 /*
8732 8732 * ensure we have a packet completion routine,
8733 8733 * then call it.
8734 8734 */
8735 8735 ASSERT(pkt->pkt_comp != NULL);
8736 8736
8737 8737 mutex_exit(&pptr->port_mutex);
8738 8738 fcp_post_callback(cmd);
8739 8739 mutex_enter(&pptr->port_mutex);
8740 8740 }
8741 8741 }
8742 8742
8743 8743
8744 8744 /*
8745 8745 * the pkt_comp callback for command packets
8746 8746 */
8747 8747 static void
8748 8748 fcp_cmd_callback(fc_packet_t *fpkt)
8749 8749 {
8750 8750 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8751 8751 struct scsi_pkt *pkt = cmd->cmd_pkt;
8752 8752 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8753 8753
8754 8754 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8755 8755
8756 8756 if (cmd->cmd_state == FCP_PKT_IDLE) {
8757 8757 cmn_err(CE_PANIC, "Packet already completed %p",
8758 8758 (void *)cmd);
8759 8759 }
8760 8760
8761 8761 /*
8762 8762 * Watch thread should be freeing the packet, ignore the pkt.
8763 8763 */
8764 8764 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8765 8765 fcp_log(CE_CONT, pptr->port_dip,
8766 8766 "!FCP: Pkt completed while aborting\n");
8767 8767 return;
8768 8768 }
8769 8769 cmd->cmd_state = FCP_PKT_IDLE;
8770 8770
8771 8771 fcp_complete_pkt(fpkt);
8772 8772
8773 8773 #ifdef DEBUG
8774 8774 mutex_enter(&pptr->port_pkt_mutex);
8775 8775 pptr->port_npkts--;
8776 8776 mutex_exit(&pptr->port_pkt_mutex);
8777 8777 #endif /* DEBUG */
8778 8778
8779 8779 fcp_post_callback(cmd);
8780 8780 }
8781 8781
8782 8782
8783 8783 static void
8784 8784 fcp_complete_pkt(fc_packet_t *fpkt)
8785 8785 {
8786 8786 int error = 0;
8787 8787 struct fcp_pkt *cmd = (struct fcp_pkt *)
8788 8788 fpkt->pkt_ulp_private;
8789 8789 struct scsi_pkt *pkt = cmd->cmd_pkt;
8790 8790 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8791 8791 struct fcp_lun *plun;
8792 8792 struct fcp_tgt *ptgt;
8793 8793 struct fcp_rsp *rsp;
8794 8794 struct scsi_address save;
8795 8795
8796 8796 #ifdef DEBUG
8797 8797 save = pkt->pkt_address;
8798 8798 #endif /* DEBUG */
8799 8799
8800 8800 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8801 8801
8802 8802 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8803 8803 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8804 8804 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8805 8805 sizeof (struct fcp_rsp));
8806 8806 }
8807 8807
8808 8808 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8809 8809 STATE_SENT_CMD | STATE_GOT_STATUS;
8810 8810
8811 8811 pkt->pkt_resid = 0;
8812 8812
8813 8813 if (fpkt->pkt_datalen) {
8814 8814 pkt->pkt_state |= STATE_XFERRED_DATA;
8815 8815 if (fpkt->pkt_data_resid) {
8816 8816 error++;
8817 8817 }
8818 8818 }
8819 8819
8820 8820 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8821 8821 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8822 8822 /*
8823 8823 * The next two checks make sure that if there
8824 8824 * is no sense data or a valid response and
8825 8825 * the command came back with check condition,
8826 8826 * the command should be retried.
8827 8827 */
8828 8828 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8829 8829 !rsp->fcp_u.fcp_status.sense_len_set) {
8830 8830 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8831 8831 pkt->pkt_resid = cmd->cmd_dmacount;
8832 8832 }
8833 8833 }
8834 8834
8835 8835 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8836 8836 return;
8837 8837 }
8838 8838
8839 8839 plun = ADDR2LUN(&pkt->pkt_address);
8840 8840 ptgt = plun->lun_tgt;
8841 8841 ASSERT(ptgt != NULL);
8842 8842
8843 8843 /*
8844 8844 * Update the transfer resid, if appropriate
8845 8845 */
8846 8846 if (rsp->fcp_u.fcp_status.resid_over ||
8847 8847 rsp->fcp_u.fcp_status.resid_under) {
8848 8848 pkt->pkt_resid = rsp->fcp_resid;
8849 8849 }
8850 8850
8851 8851 /*
8852 8852 * First see if we got a FCP protocol error.
8853 8853 */
8854 8854 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8855 8855 struct fcp_rsp_info *bep;
8856 8856 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8857 8857 sizeof (struct fcp_rsp));
8858 8858
8859 8859 if (fcp_validate_fcp_response(rsp, pptr) !=
8860 8860 FC_SUCCESS) {
8861 8861 pkt->pkt_reason = CMD_CMPLT;
8862 8862 *(pkt->pkt_scbp) = STATUS_CHECK;
8863 8863
8864 8864 fcp_log(CE_WARN, pptr->port_dip,
8865 8865 "!SCSI command to d_id=0x%x lun=0x%x"
8866 8866 " failed, Bad FCP response values:"
8867 8867 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8868 8868 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8869 8869 ptgt->tgt_d_id, plun->lun_num,
8870 8870 rsp->reserved_0, rsp->reserved_1,
8871 8871 rsp->fcp_u.fcp_status.reserved_0,
8872 8872 rsp->fcp_u.fcp_status.reserved_1,
8873 8873 rsp->fcp_response_len, rsp->fcp_sense_len);
8874 8874
8875 8875 return;
8876 8876 }
8877 8877
8878 8878 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8879 8879 FCP_CP_IN(fpkt->pkt_resp +
8880 8880 sizeof (struct fcp_rsp), bep,
8881 8881 fpkt->pkt_resp_acc,
8882 8882 sizeof (struct fcp_rsp_info));
8883 8883 }
8884 8884
8885 8885 if (bep->rsp_code != FCP_NO_FAILURE) {
8886 8886 child_info_t *cip;
8887 8887
8888 8888 pkt->pkt_reason = CMD_TRAN_ERR;
8889 8889
8890 8890 mutex_enter(&plun->lun_mutex);
8891 8891 cip = plun->lun_cip;
8892 8892 mutex_exit(&plun->lun_mutex);
8893 8893
8894 8894 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8895 8895 fcp_trace, FCP_BUF_LEVEL_2, 0,
8896 8896 "FCP response error on cmd=%p"
8897 8897 " target=0x%x, cip=%p", cmd,
8898 8898 ptgt->tgt_d_id, cip);
8899 8899 }
8900 8900 }
8901 8901
8902 8902 /*
8903 8903 * See if we got a SCSI error with sense data
8904 8904 */
8905 8905 if (rsp->fcp_u.fcp_status.sense_len_set) {
8906 8906 uchar_t rqlen;
8907 8907 caddr_t sense_from;
8908 8908 child_info_t *cip;
8909 8909 timeout_id_t tid;
8910 8910 struct scsi_arq_status *arq;
8911 8911 struct scsi_extended_sense *sense_to;
8912 8912
8913 8913 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8914 8914 sense_to = &arq->sts_sensedata;
8915 8915
8916 8916 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8917 8917 sizeof (struct scsi_extended_sense));
8918 8918
8919 8919 sense_from = (caddr_t)fpkt->pkt_resp +
8920 8920 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8921 8921
8922 8922 if (fcp_validate_fcp_response(rsp, pptr) !=
8923 8923 FC_SUCCESS) {
8924 8924 pkt->pkt_reason = CMD_CMPLT;
8925 8925 *(pkt->pkt_scbp) = STATUS_CHECK;
8926 8926
8927 8927 fcp_log(CE_WARN, pptr->port_dip,
8928 8928 "!SCSI command to d_id=0x%x lun=0x%x"
8929 8929 " failed, Bad FCP response values:"
8930 8930 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8931 8931 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8932 8932 ptgt->tgt_d_id, plun->lun_num,
8933 8933 rsp->reserved_0, rsp->reserved_1,
8934 8934 rsp->fcp_u.fcp_status.reserved_0,
8935 8935 rsp->fcp_u.fcp_status.reserved_1,
8936 8936 rsp->fcp_response_len, rsp->fcp_sense_len);
8937 8937
8938 8938 return;
8939 8939 }
8940 8940
8941 8941 /*
8942 8942 * copy in sense information
8943 8943 */
8944 8944 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8945 8945 FCP_CP_IN(sense_from, sense_to,
8946 8946 fpkt->pkt_resp_acc, rqlen);
8947 8947 } else {
8948 8948 bcopy(sense_from, sense_to, rqlen);
8949 8949 }
8950 8950
8951 8951 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8952 8952 (FCP_SENSE_NO_LUN(sense_to))) {
8953 8953 mutex_enter(&ptgt->tgt_mutex);
8954 8954 if (ptgt->tgt_tid == NULL) {
8955 8955 /*
8956 8956 * Kick off rediscovery
8957 8957 */
8958 8958 tid = timeout(fcp_reconfigure_luns,
8959 8959 (caddr_t)ptgt, drv_usectohz(1));
8960 8960
8961 8961 ptgt->tgt_tid = tid;
8962 8962 ptgt->tgt_state |= FCP_TGT_BUSY;
8963 8963 }
8964 8964 mutex_exit(&ptgt->tgt_mutex);
8965 8965 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8966 8966 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8967 8967 fcp_trace, FCP_BUF_LEVEL_3, 0,
8968 8968 "!FCP: Report Lun Has Changed"
8969 8969 " target=%x", ptgt->tgt_d_id);
8970 8970 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8971 8971 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8972 8972 fcp_trace, FCP_BUF_LEVEL_3, 0,
8973 8973 "!FCP: LU Not Supported"
8974 8974 " target=%x", ptgt->tgt_d_id);
8975 8975 }
8976 8976 }
8977 8977 ASSERT(pkt->pkt_scbp != NULL);
8978 8978
8979 8979 pkt->pkt_state |= STATE_ARQ_DONE;
8980 8980
8981 8981 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8982 8982
8983 8983 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8984 8984 arq->sts_rqpkt_reason = 0;
8985 8985 arq->sts_rqpkt_statistics = 0;
8986 8986
8987 8987 arq->sts_rqpkt_state = STATE_GOT_BUS |
8988 8988 STATE_GOT_TARGET | STATE_SENT_CMD |
8989 8989 STATE_GOT_STATUS | STATE_ARQ_DONE |
8990 8990 STATE_XFERRED_DATA;
8991 8991
8992 8992 mutex_enter(&plun->lun_mutex);
8993 8993 cip = plun->lun_cip;
8994 8994 mutex_exit(&plun->lun_mutex);
8995 8995
8996 8996 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8997 8997 fcp_trace, FCP_BUF_LEVEL_8, 0,
8998 8998 "SCSI Check condition on cmd=%p target=0x%x"
8999 8999 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
9000 9000 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9001 9001 cmd->cmd_fcp_cmd.fcp_cdb[0],
9002 9002 rsp->fcp_u.fcp_status.scsi_status,
9003 9003 sense_to->es_key, sense_to->es_add_code,
9004 9004 sense_to->es_qual_code);
9005 9005 }
9006 9006 } else {
9007 9007 plun = ADDR2LUN(&pkt->pkt_address);
9008 9008 ptgt = plun->lun_tgt;
9009 9009 ASSERT(ptgt != NULL);
9010 9010
9011 9011 /*
9012 9012 * Work harder to translate errors into target driver
9013 9013 * understandable ones. Note with despair that the target
9014 9014 * drivers don't decode pkt_state and pkt_reason exhaustively
9015 9015 * They resort to using the big hammer most often, which
9016 9016 * may not get fixed in the life time of this driver.
9017 9017 */
9018 9018 pkt->pkt_state = 0;
9019 9019 pkt->pkt_statistics = 0;
9020 9020
9021 9021 switch (fpkt->pkt_state) {
9022 9022 case FC_PKT_TRAN_ERROR:
9023 9023 switch (fpkt->pkt_reason) {
9024 9024 case FC_REASON_OVERRUN:
9025 9025 pkt->pkt_reason = CMD_CMD_OVR;
9026 9026 pkt->pkt_statistics |= STAT_ABORTED;
9027 9027 break;
9028 9028
9029 9029 case FC_REASON_XCHG_BSY: {
9030 9030 caddr_t ptr;
9031 9031
9032 9032 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9033 9033
9034 9034 ptr = (caddr_t)pkt->pkt_scbp;
9035 9035 if (ptr) {
9036 9036 *ptr = STATUS_BUSY;
9037 9037 }
9038 9038 break;
9039 9039 }
9040 9040
9041 9041 case FC_REASON_ABORTED:
9042 9042 pkt->pkt_reason = CMD_TRAN_ERR;
9043 9043 pkt->pkt_statistics |= STAT_ABORTED;
9044 9044 break;
9045 9045
9046 9046 case FC_REASON_ABORT_FAILED:
9047 9047 pkt->pkt_reason = CMD_ABORT_FAIL;
9048 9048 break;
9049 9049
9050 9050 case FC_REASON_NO_SEQ_INIT:
9051 9051 case FC_REASON_CRC_ERROR:
9052 9052 pkt->pkt_reason = CMD_TRAN_ERR;
9053 9053 pkt->pkt_statistics |= STAT_ABORTED;
9054 9054 break;
9055 9055 default:
9056 9056 pkt->pkt_reason = CMD_TRAN_ERR;
9057 9057 break;
9058 9058 }
9059 9059 break;
9060 9060
9061 9061 case FC_PKT_PORT_OFFLINE: {
9062 9062 dev_info_t *cdip = NULL;
9063 9063 caddr_t ptr;
9064 9064
9065 9065 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9066 9066 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9067 9067 fcp_trace, FCP_BUF_LEVEL_8, 0,
9068 9068 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9069 9069 ptgt->tgt_d_id);
9070 9070 }
9071 9071
9072 9072 mutex_enter(&plun->lun_mutex);
9073 9073 if (plun->lun_mpxio == 0) {
9074 9074 cdip = DIP(plun->lun_cip);
9075 9075 } else if (plun->lun_cip) {
9076 9076 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9077 9077 }
9078 9078
9079 9079 mutex_exit(&plun->lun_mutex);
9080 9080
9081 9081 if (cdip) {
9082 9082 (void) ndi_event_retrieve_cookie(
9083 9083 pptr->port_ndi_event_hdl, cdip,
9084 9084 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9085 9085 NDI_EVENT_NOPASS);
9086 9086 (void) ndi_event_run_callbacks(
9087 9087 pptr->port_ndi_event_hdl, cdip,
9088 9088 fcp_remove_eid, NULL);
9089 9089 }
9090 9090
9091 9091 /*
9092 9092 * If the link goes off-line for a lip,
9093 9093 * this will cause a error to the ST SG
9094 9094 * SGEN drivers. By setting BUSY we will
9095 9095 * give the drivers the chance to retry
9096 9096 * before it blows of the job. ST will
9097 9097 * remember how many times it has retried.
9098 9098 */
9099 9099
9100 9100 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9101 9101 (plun->lun_type == DTYPE_CHANGER)) {
9102 9102 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9103 9103 ptr = (caddr_t)pkt->pkt_scbp;
9104 9104 if (ptr) {
9105 9105 *ptr = STATUS_BUSY;
9106 9106 }
9107 9107 } else {
9108 9108 pkt->pkt_reason = CMD_TRAN_ERR;
9109 9109 pkt->pkt_statistics |= STAT_BUS_RESET;
9110 9110 }
9111 9111 break;
9112 9112 }
9113 9113
9114 9114 case FC_PKT_TRAN_BSY:
9115 9115 /*
9116 9116 * Use the ssd Qfull handling here.
9117 9117 */
9118 9118 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9119 9119 pkt->pkt_state = STATE_GOT_BUS;
9120 9120 break;
9121 9121
9122 9122 case FC_PKT_TIMEOUT:
9123 9123 pkt->pkt_reason = CMD_TIMEOUT;
9124 9124 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9125 9125 pkt->pkt_statistics |= STAT_TIMEOUT;
9126 9126 } else {
9127 9127 pkt->pkt_statistics |= STAT_ABORTED;
9128 9128 }
9129 9129 break;
9130 9130
9131 9131 case FC_PKT_LOCAL_RJT:
9132 9132 switch (fpkt->pkt_reason) {
9133 9133 case FC_REASON_OFFLINE: {
9134 9134 dev_info_t *cdip = NULL;
9135 9135
9136 9136 mutex_enter(&plun->lun_mutex);
9137 9137 if (plun->lun_mpxio == 0) {
9138 9138 cdip = DIP(plun->lun_cip);
9139 9139 } else if (plun->lun_cip) {
9140 9140 cdip = mdi_pi_get_client(
9141 9141 PIP(plun->lun_cip));
9142 9142 }
9143 9143 mutex_exit(&plun->lun_mutex);
9144 9144
9145 9145 if (cdip) {
9146 9146 (void) ndi_event_retrieve_cookie(
9147 9147 pptr->port_ndi_event_hdl, cdip,
9148 9148 FCAL_REMOVE_EVENT,
9149 9149 &fcp_remove_eid,
9150 9150 NDI_EVENT_NOPASS);
9151 9151 (void) ndi_event_run_callbacks(
9152 9152 pptr->port_ndi_event_hdl,
9153 9153 cdip, fcp_remove_eid, NULL);
9154 9154 }
9155 9155
9156 9156 pkt->pkt_reason = CMD_TRAN_ERR;
9157 9157 pkt->pkt_statistics |= STAT_BUS_RESET;
9158 9158
9159 9159 break;
9160 9160 }
9161 9161
9162 9162 case FC_REASON_NOMEM:
9163 9163 case FC_REASON_QFULL: {
9164 9164 caddr_t ptr;
9165 9165
9166 9166 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9167 9167 ptr = (caddr_t)pkt->pkt_scbp;
9168 9168 if (ptr) {
9169 9169 *ptr = STATUS_BUSY;
9170 9170 }
9171 9171 break;
9172 9172 }
9173 9173
9174 9174 case FC_REASON_DMA_ERROR:
9175 9175 pkt->pkt_reason = CMD_DMA_DERR;
9176 9176 pkt->pkt_statistics |= STAT_ABORTED;
9177 9177 break;
9178 9178
9179 9179 case FC_REASON_CRC_ERROR:
9180 9180 case FC_REASON_UNDERRUN: {
9181 9181 uchar_t status;
9182 9182 /*
9183 9183 * Work around for Bugid: 4240945.
9184 9184 * IB on A5k doesn't set the Underrun bit
9185 9185 * in the fcp status, when it is transferring
9186 9186 * less than requested amount of data. Work
9187 9187 * around the ses problem to keep luxadm
9188 9188 * happy till ibfirmware is fixed.
9189 9189 */
9190 9190 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9191 9191 FCP_CP_IN(fpkt->pkt_resp, rsp,
9192 9192 fpkt->pkt_resp_acc,
9193 9193 sizeof (struct fcp_rsp));
9194 9194 }
9195 9195 status = rsp->fcp_u.fcp_status.scsi_status;
9196 9196 if (((plun->lun_type & DTYPE_MASK) ==
9197 9197 DTYPE_ESI) && (status == STATUS_GOOD)) {
9198 9198 pkt->pkt_reason = CMD_CMPLT;
9199 9199 *pkt->pkt_scbp = status;
9200 9200 pkt->pkt_resid = 0;
9201 9201 } else {
9202 9202 pkt->pkt_reason = CMD_TRAN_ERR;
9203 9203 pkt->pkt_statistics |= STAT_ABORTED;
9204 9204 }
9205 9205 break;
9206 9206 }
9207 9207
9208 9208 case FC_REASON_NO_CONNECTION:
9209 9209 case FC_REASON_UNSUPPORTED:
9210 9210 case FC_REASON_ILLEGAL_REQ:
9211 9211 case FC_REASON_BAD_SID:
9212 9212 case FC_REASON_DIAG_BUSY:
9213 9213 case FC_REASON_FCAL_OPN_FAIL:
9214 9214 case FC_REASON_BAD_XID:
9215 9215 default:
9216 9216 pkt->pkt_reason = CMD_TRAN_ERR;
9217 9217 pkt->pkt_statistics |= STAT_ABORTED;
9218 9218 break;
9219 9219
9220 9220 }
9221 9221 break;
9222 9222
9223 9223 case FC_PKT_NPORT_RJT:
9224 9224 case FC_PKT_FABRIC_RJT:
9225 9225 case FC_PKT_NPORT_BSY:
9226 9226 case FC_PKT_FABRIC_BSY:
9227 9227 default:
9228 9228 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9229 9229 fcp_trace, FCP_BUF_LEVEL_8, 0,
9230 9230 "FC Status 0x%x, reason 0x%x",
9231 9231 fpkt->pkt_state, fpkt->pkt_reason);
9232 9232 pkt->pkt_reason = CMD_TRAN_ERR;
9233 9233 pkt->pkt_statistics |= STAT_ABORTED;
9234 9234 break;
9235 9235 }
9236 9236
9237 9237 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9238 9238 fcp_trace, FCP_BUF_LEVEL_9, 0,
9239 9239 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9240 9240 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9241 9241 fpkt->pkt_reason);
9242 9242 }
9243 9243
9244 9244 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9245 9245 }
9246 9246
9247 9247
9248 9248 static int
9249 9249 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9250 9250 {
9251 9251 if (rsp->reserved_0 || rsp->reserved_1 ||
9252 9252 rsp->fcp_u.fcp_status.reserved_0 ||
9253 9253 rsp->fcp_u.fcp_status.reserved_1) {
9254 9254 /*
9255 9255 * These reserved fields should ideally be zero. FCP-2 does say
9256 9256 * that the recipient need not check for reserved fields to be
9257 9257 * zero. If they are not zero, we will not make a fuss about it
9258 9258 * - just log it (in debug to both trace buffer and messages
9259 9259 * file and to trace buffer only in non-debug) and move on.
9260 9260 *
9261 9261 * Non-zero reserved fields were seen with minnows.
9262 9262 *
9263 9263 * qlc takes care of some of this but we cannot assume that all
9264 9264 * FCAs will do so.
9265 9265 */
9266 9266 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9267 9267 FCP_BUF_LEVEL_5, 0,
9268 9268 "Got fcp response packet with non-zero reserved fields "
9269 9269 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9270 9270 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9271 9271 rsp->reserved_0, rsp->reserved_1,
9272 9272 rsp->fcp_u.fcp_status.reserved_0,
9273 9273 rsp->fcp_u.fcp_status.reserved_1);
9274 9274 }
9275 9275
9276 9276 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9277 9277 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9278 9278 return (FC_FAILURE);
9279 9279 }
9280 9280
9281 9281 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9282 9282 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9283 9283 sizeof (struct fcp_rsp))) {
9284 9284 return (FC_FAILURE);
9285 9285 }
9286 9286
9287 9287 return (FC_SUCCESS);
9288 9288 }
9289 9289
9290 9290
9291 9291 /*
9292 9292 * This is called when there is a change the in device state. The case we're
9293 9293 * handling here is, if the d_id s does not match, offline this tgt and online
9294 9294 * a new tgt with the new d_id. called from fcp_handle_devices with
9295 9295 * port_mutex held.
9296 9296 */
9297 9297 static int
9298 9298 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9299 9299 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9300 9300 {
9301 9301 ASSERT(mutex_owned(&pptr->port_mutex));
9302 9302
9303 9303 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9304 9304 fcp_trace, FCP_BUF_LEVEL_3, 0,
9305 9305 "Starting fcp_device_changed...");
9306 9306
9307 9307 /*
9308 9308 * The two cases where the port_device_changed is called is
9309 9309 * either it changes it's d_id or it's hard address.
9310 9310 */
9311 9311 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9312 9312 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9313 9313 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9314 9314
9315 9315 /* offline this target */
9316 9316 mutex_enter(&ptgt->tgt_mutex);
9317 9317 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9318 9318 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9319 9319 0, 1, NDI_DEVI_REMOVE);
9320 9320 }
9321 9321 mutex_exit(&ptgt->tgt_mutex);
9322 9322
9323 9323 fcp_log(CE_NOTE, pptr->port_dip,
9324 9324 "Change in target properties: Old D_ID=%x New D_ID=%x"
9325 9325 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9326 9326 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9327 9327 map_entry->map_hard_addr.hard_addr);
9328 9328 }
9329 9329
9330 9330 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9331 9331 link_cnt, tgt_cnt, cause));
9332 9332 }
9333 9333
9334 9334 /*
9335 9335 * Function: fcp_alloc_lun
9336 9336 *
9337 9337 * Description: Creates a new lun structure and adds it to the list
9338 9338 * of luns of the target.
9339 9339 *
9340 9340 * Argument: ptgt Target the lun will belong to.
9341 9341 *
9342 9342 * Return Value: NULL Failed
9343 9343 * Not NULL Succeeded
9344 9344 *
9345 9345 * Context: Kernel context
9346 9346 */
9347 9347 static struct fcp_lun *
9348 9348 fcp_alloc_lun(struct fcp_tgt *ptgt)
9349 9349 {
9350 9350 struct fcp_lun *plun;
9351 9351
9352 9352 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9353 9353 if (plun != NULL) {
9354 9354 /*
9355 9355 * Initialize the mutex before putting in the target list
9356 9356 * especially before releasing the target mutex.
9357 9357 */
9358 9358 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9359 9359 plun->lun_tgt = ptgt;
9360 9360
9361 9361 mutex_enter(&ptgt->tgt_mutex);
9362 9362 plun->lun_next = ptgt->tgt_lun;
9363 9363 ptgt->tgt_lun = plun;
9364 9364 plun->lun_old_guid = NULL;
9365 9365 plun->lun_old_guid_size = 0;
9366 9366 mutex_exit(&ptgt->tgt_mutex);
9367 9367 }
9368 9368
9369 9369 return (plun);
9370 9370 }
9371 9371
9372 9372 /*
9373 9373 * Function: fcp_dealloc_lun
9374 9374 *
9375 9375 * Description: Frees the LUN structure passed by the caller.
9376 9376 *
9377 9377 * Argument: plun LUN structure to free.
9378 9378 *
9379 9379 * Return Value: None
9380 9380 *
9381 9381 * Context: Kernel context.
9382 9382 */
9383 9383 static void
9384 9384 fcp_dealloc_lun(struct fcp_lun *plun)
9385 9385 {
9386 9386 mutex_enter(&plun->lun_mutex);
9387 9387 if (plun->lun_cip) {
9388 9388 fcp_remove_child(plun);
9389 9389 }
9390 9390 mutex_exit(&plun->lun_mutex);
9391 9391
9392 9392 mutex_destroy(&plun->lun_mutex);
9393 9393 if (plun->lun_guid) {
9394 9394 kmem_free(plun->lun_guid, plun->lun_guid_size);
9395 9395 }
9396 9396 if (plun->lun_old_guid) {
9397 9397 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9398 9398 }
9399 9399 kmem_free(plun, sizeof (*plun));
9400 9400 }
9401 9401
9402 9402 /*
9403 9403 * Function: fcp_alloc_tgt
9404 9404 *
9405 9405 * Description: Creates a new target structure and adds it to the port
9406 9406 * hash list.
9407 9407 *
9408 9408 * Argument: pptr fcp port structure
9409 9409 * *map_entry entry describing the target to create
9410 9410 * link_cnt Link state change counter
9411 9411 *
9412 9412 * Return Value: NULL Failed
9413 9413 * Not NULL Succeeded
9414 9414 *
9415 9415 * Context: Kernel context.
9416 9416 */
9417 9417 static struct fcp_tgt *
9418 9418 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9419 9419 {
9420 9420 int hash;
9421 9421 uchar_t *wwn;
9422 9422 struct fcp_tgt *ptgt;
9423 9423
9424 9424 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9425 9425 if (ptgt != NULL) {
9426 9426 mutex_enter(&pptr->port_mutex);
9427 9427 if (link_cnt != pptr->port_link_cnt) {
9428 9428 /*
9429 9429 * oh oh -- another link reset
9430 9430 * in progress -- give up
9431 9431 */
9432 9432 mutex_exit(&pptr->port_mutex);
9433 9433 kmem_free(ptgt, sizeof (*ptgt));
9434 9434 ptgt = NULL;
9435 9435 } else {
9436 9436 /*
9437 9437 * initialize the mutex before putting in the port
9438 9438 * wwn list, especially before releasing the port
9439 9439 * mutex.
9440 9440 */
9441 9441 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9442 9442
9443 9443 /* add new target entry to the port's hash list */
9444 9444 wwn = (uchar_t *)&map_entry->map_pwwn;
9445 9445 hash = FCP_HASH(wwn);
9446 9446
9447 9447 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9448 9448 pptr->port_tgt_hash_table[hash] = ptgt;
9449 9449
9450 9450 /* save cross-ptr */
9451 9451 ptgt->tgt_port = pptr;
9452 9452
9453 9453 ptgt->tgt_change_cnt = 1;
9454 9454
9455 9455 /* initialize the target manual_config_only flag */
9456 9456 if (fcp_enable_auto_configuration) {
9457 9457 ptgt->tgt_manual_config_only = 0;
9458 9458 } else {
9459 9459 ptgt->tgt_manual_config_only = 1;
9460 9460 }
9461 9461
9462 9462 mutex_exit(&pptr->port_mutex);
9463 9463 }
9464 9464 }
9465 9465
9466 9466 return (ptgt);
9467 9467 }
9468 9468
9469 9469 /*
9470 9470 * Function: fcp_dealloc_tgt
9471 9471 *
9472 9472 * Description: Frees the target structure passed by the caller.
9473 9473 *
9474 9474 * Argument: ptgt Target structure to free.
9475 9475 *
9476 9476 * Return Value: None
9477 9477 *
9478 9478 * Context: Kernel context.
9479 9479 */
9480 9480 static void
9481 9481 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9482 9482 {
9483 9483 mutex_destroy(&ptgt->tgt_mutex);
9484 9484 kmem_free(ptgt, sizeof (*ptgt));
9485 9485 }
9486 9486
9487 9487
9488 9488 /*
9489 9489 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9490 9490 *
9491 9491 * Device discovery commands will not be retried for-ever as
9492 9492 * this will have repercussions on other devices that need to
9493 9493 * be submitted to the hotplug thread. After a quick glance
9494 9494 * at the SCSI-3 spec, it was found that the spec doesn't
9495 9495 * mandate a forever retry, rather recommends a delayed retry.
9496 9496 *
9497 9497 * Since Photon IB is single threaded, STATUS_BUSY is common
9498 9498 * in a 4+initiator environment. Make sure the total time
9499 9499 * spent on retries (including command timeout) does not
9500 9500 * 60 seconds
9501 9501 */
9502 9502 static void
9503 9503 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9504 9504 {
9505 9505 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9506 9506 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9507 9507
9508 9508 mutex_enter(&pptr->port_mutex);
9509 9509 mutex_enter(&ptgt->tgt_mutex);
9510 9510 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9511 9511 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9512 9512 fcp_trace, FCP_BUF_LEVEL_2, 0,
9513 9513 "fcp_queue_ipkt,1:state change occured"
9514 9514 " for D_ID=0x%x", ptgt->tgt_d_id);
9515 9515 mutex_exit(&ptgt->tgt_mutex);
9516 9516 mutex_exit(&pptr->port_mutex);
9517 9517 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9518 9518 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9519 9519 fcp_icmd_free(pptr, icmd);
9520 9520 return;
9521 9521 }
9522 9522 mutex_exit(&ptgt->tgt_mutex);
9523 9523
9524 9524 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9525 9525
9526 9526 if (pptr->port_ipkt_list != NULL) {
9527 9527 /* add pkt to front of doubly-linked list */
9528 9528 pptr->port_ipkt_list->ipkt_prev = icmd;
9529 9529 icmd->ipkt_next = pptr->port_ipkt_list;
9530 9530 pptr->port_ipkt_list = icmd;
9531 9531 icmd->ipkt_prev = NULL;
9532 9532 } else {
9533 9533 /* this is the first/only pkt on the list */
9534 9534 pptr->port_ipkt_list = icmd;
9535 9535 icmd->ipkt_next = NULL;
9536 9536 icmd->ipkt_prev = NULL;
9537 9537 }
9538 9538 mutex_exit(&pptr->port_mutex);
9539 9539 }
9540 9540
9541 9541 /*
9542 9542 * Function: fcp_transport
9543 9543 *
9544 9544 * Description: This function submits the Fibre Channel packet to the transort
9545 9545 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9546 9546 * fails the submission, the treatment depends on the value of
9547 9547 * the variable internal.
9548 9548 *
9549 9549 * Argument: port_handle fp/fctl port handle.
9550 9550 * *fpkt Packet to submit to the transport layer.
9551 9551 * internal Not zero when it's an internal packet.
9552 9552 *
9553 9553 * Return Value: FC_TRAN_BUSY
9554 9554 * FC_STATEC_BUSY
9555 9555 * FC_OFFLINE
9556 9556 * FC_LOGINREQ
9557 9557 * FC_DEVICE_BUSY
9558 9558 * FC_SUCCESS
9559 9559 */
9560 9560 static int
9561 9561 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9562 9562 {
9563 9563 int rval;
9564 9564
9565 9565 rval = fc_ulp_transport(port_handle, fpkt);
9566 9566 if (rval == FC_SUCCESS) {
9567 9567 return (rval);
9568 9568 }
9569 9569
9570 9570 /*
9571 9571 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9572 9572 * a command, if the underlying modules see that there is a state
9573 9573 * change, or if a port is OFFLINE, that means, that state change
9574 9574 * hasn't reached FCP yet, so re-queue the command for deferred
9575 9575 * submission.
9576 9576 */
9577 9577 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9578 9578 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9579 9579 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9580 9580 /*
9581 9581 * Defer packet re-submission. Life hang is possible on
9582 9582 * internal commands if the port driver sends FC_STATEC_BUSY
9583 9583 * for ever, but that shouldn't happen in a good environment.
9584 9584 * Limiting re-transport for internal commands is probably a
9585 9585 * good idea..
9586 9586 * A race condition can happen when a port sees barrage of
9587 9587 * link transitions offline to online. If the FCTL has
9588 9588 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9589 9589 * internal commands should be queued to do the discovery.
9590 9590 * The race condition is when an online comes and FCP starts
9591 9591 * its internal discovery and the link goes offline. It is
9592 9592 * possible that the statec_callback has not reached FCP
9593 9593 * and FCP is carrying on with its internal discovery.
9594 9594 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9595 9595 * that the link has gone offline. At this point FCP should
9596 9596 * drop all the internal commands and wait for the
9597 9597 * statec_callback. It will be facilitated by incrementing
9598 9598 * port_link_cnt.
9599 9599 *
9600 9600 * For external commands, the (FC)pkt_timeout is decremented
9601 9601 * by the QUEUE Delay added by our driver, Care is taken to
9602 9602 * ensure that it doesn't become zero (zero means no timeout)
9603 9603 * If the time expires right inside driver queue itself,
9604 9604 * the watch thread will return it to the original caller
9605 9605 * indicating that the command has timed-out.
9606 9606 */
9607 9607 if (internal) {
9608 9608 char *op;
9609 9609 struct fcp_ipkt *icmd;
9610 9610
9611 9611 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9612 9612 switch (icmd->ipkt_opcode) {
9613 9613 case SCMD_REPORT_LUN:
9614 9614 op = "REPORT LUN";
9615 9615 break;
9616 9616
9617 9617 case SCMD_INQUIRY:
9618 9618 op = "INQUIRY";
9619 9619 break;
9620 9620
9621 9621 case SCMD_INQUIRY_PAGE83:
9622 9622 op = "INQUIRY-83";
9623 9623 break;
9624 9624
9625 9625 default:
9626 9626 op = "Internal SCSI COMMAND";
9627 9627 break;
9628 9628 }
9629 9629
9630 9630 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9631 9631 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9632 9632 rval = FC_SUCCESS;
9633 9633 }
9634 9634 } else {
9635 9635 struct fcp_pkt *cmd;
9636 9636 struct fcp_port *pptr;
9637 9637
9638 9638 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9639 9639 cmd->cmd_state = FCP_PKT_IDLE;
9640 9640 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9641 9641
9642 9642 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9643 9643 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9644 9644 fcp_trace, FCP_BUF_LEVEL_9, 0,
9645 9645 "fcp_transport: xport busy for pkt %p",
9646 9646 cmd->cmd_pkt);
9647 9647 rval = FC_TRAN_BUSY;
9648 9648 } else {
9649 9649 fcp_queue_pkt(pptr, cmd);
9650 9650 rval = FC_SUCCESS;
9651 9651 }
9652 9652 }
9653 9653 }
9654 9654
9655 9655 return (rval);
9656 9656 }
9657 9657
9658 9658 /*VARARGS3*/
9659 9659 static void
9660 9660 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9661 9661 {
9662 9662 char buf[256];
9663 9663 va_list ap;
9664 9664
9665 9665 if (dip == NULL) {
9666 9666 dip = fcp_global_dip;
9667 9667 }
9668 9668
9669 9669 va_start(ap, fmt);
9670 9670 (void) vsprintf(buf, fmt, ap);
9671 9671 va_end(ap);
9672 9672
9673 9673 scsi_log(dip, "fcp", level, buf);
9674 9674 }
9675 9675
9676 9676 /*
9677 9677 * This function retries NS registry of FC4 type.
9678 9678 * It assumes that fcp_mutex is held.
9679 9679 * The function does nothing if topology is not fabric
9680 9680 * So, the topology has to be set before this function can be called
9681 9681 */
9682 9682 static void
9683 9683 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9684 9684 {
9685 9685 int rval;
9686 9686
9687 9687 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9688 9688
9689 9689 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9690 9690 ((pptr->port_topology != FC_TOP_FABRIC) &&
9691 9691 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9692 9692 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9693 9693 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9694 9694 }
9695 9695 return;
9696 9696 }
9697 9697 mutex_exit(&pptr->port_mutex);
9698 9698 rval = fcp_do_ns_registry(pptr, s_id);
9699 9699 mutex_enter(&pptr->port_mutex);
9700 9700
9701 9701 if (rval == 0) {
9702 9702 /* Registry successful. Reset flag */
9703 9703 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9704 9704 }
9705 9705 }
9706 9706
9707 9707 /*
9708 9708 * This function registers the ULP with the switch by calling transport i/f
9709 9709 */
9710 9710 static int
9711 9711 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9712 9712 {
9713 9713 fc_ns_cmd_t ns_cmd;
9714 9714 ns_rfc_type_t rfc;
9715 9715 uint32_t types[8];
9716 9716
9717 9717 /*
9718 9718 * Prepare the Name server structure to
9719 9719 * register with the transport in case of
9720 9720 * Fabric configuration.
9721 9721 */
9722 9722 bzero(&rfc, sizeof (rfc));
9723 9723 bzero(types, sizeof (types));
9724 9724
9725 9725 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9726 9726 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9727 9727
9728 9728 rfc.rfc_port_id.port_id = s_id;
9729 9729 bcopy(types, rfc.rfc_types, sizeof (types));
9730 9730
9731 9731 ns_cmd.ns_flags = 0;
9732 9732 ns_cmd.ns_cmd = NS_RFT_ID;
9733 9733 ns_cmd.ns_req_len = sizeof (rfc);
9734 9734 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9735 9735 ns_cmd.ns_resp_len = 0;
9736 9736 ns_cmd.ns_resp_payload = NULL;
9737 9737
9738 9738 /*
9739 9739 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9740 9740 */
9741 9741 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9742 9742 fcp_log(CE_WARN, pptr->port_dip,
9743 9743 "!ns_registry: failed name server registration");
9744 9744 return (1);
9745 9745 }
9746 9746
9747 9747 return (0);
9748 9748 }
9749 9749
9750 9750 /*
9751 9751 * Function: fcp_handle_port_attach
9752 9752 *
9753 9753 * Description: This function is called from fcp_port_attach() to attach a
9754 9754 * new port. This routine does the following:
9755 9755 *
9756 9756 * 1) Allocates an fcp_port structure and initializes it.
9757 9757 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9758 9758 * server.
9759 9759 * 3) Kicks off the enumeration of the targets/luns visible
9760 9760 * through this new port. That is done by calling
9761 9761 * fcp_statec_callback() if the port is online.
9762 9762 *
9763 9763 * Argument: ulph fp/fctl port handle.
9764 9764 * *pinfo Port information.
9765 9765 * s_id Port ID.
9766 9766 * instance Device instance number for the local port
9767 9767 * (returned by ddi_get_instance()).
9768 9768 *
9769 9769 * Return Value: DDI_SUCCESS
9770 9770 * DDI_FAILURE
9771 9771 *
9772 9772 * Context: User and Kernel context.
9773 9773 */
9774 9774 /*ARGSUSED*/
9775 9775 int
9776 9776 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9777 9777 uint32_t s_id, int instance)
9778 9778 {
9779 9779 int res = DDI_FAILURE;
9780 9780 scsi_hba_tran_t *tran;
9781 9781 int mutex_initted = FALSE;
9782 9782 int hba_attached = FALSE;
9783 9783 int soft_state_linked = FALSE;
9784 9784 int event_bind = FALSE;
9785 9785 struct fcp_port *pptr;
9786 9786 fc_portmap_t *tmp_list = NULL;
9787 9787 uint32_t max_cnt, alloc_cnt;
9788 9788 uchar_t *boot_wwn = NULL;
9789 9789 uint_t nbytes;
9790 9790 int manual_cfg;
9791 9791
9792 9792 /*
9793 9793 * this port instance attaching for the first time (or after
9794 9794 * being detached before)
9795 9795 */
9796 9796 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9797 9797 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9798 9798
9799 9799 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9800 9800 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9801 9801 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9802 9802 instance);
9803 9803 return (res);
9804 9804 }
9805 9805
9806 9806 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9807 9807 /* this shouldn't happen */
9808 9808 ddi_soft_state_free(fcp_softstate, instance);
9809 9809 cmn_err(CE_WARN, "fcp: bad soft state");
9810 9810 return (res);
9811 9811 }
9812 9812
9813 9813 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9814 9814
9815 9815 /*
9816 9816 * Make a copy of ulp_port_info as fctl allocates
9817 9817 * a temp struct.
9818 9818 */
9819 9819 (void) fcp_cp_pinfo(pptr, pinfo);
9820 9820
9821 9821 /*
9822 9822 * Check for manual_configuration_only property.
9823 9823 * Enable manual configurtion if the property is
9824 9824 * set to 1, otherwise disable manual configuration.
9825 9825 */
9826 9826 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9827 9827 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9828 9828 MANUAL_CFG_ONLY,
9829 9829 -1)) != -1) {
9830 9830 if (manual_cfg == 1) {
9831 9831 char *pathname;
9832 9832 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9833 9833 (void) ddi_pathname(pptr->port_dip, pathname);
9834 9834 cmn_err(CE_NOTE,
9835 9835 "%s (%s%d) %s is enabled via %s.conf.",
9836 9836 pathname,
9837 9837 ddi_driver_name(pptr->port_dip),
9838 9838 ddi_get_instance(pptr->port_dip),
9839 9839 MANUAL_CFG_ONLY,
9840 9840 ddi_driver_name(pptr->port_dip));
9841 9841 fcp_enable_auto_configuration = 0;
9842 9842 kmem_free(pathname, MAXPATHLEN);
9843 9843 }
9844 9844 }
9845 9845 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9846 9846 pptr->port_link_cnt = 1;
9847 9847 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9848 9848 pptr->port_id = s_id;
9849 9849 pptr->port_instance = instance;
9850 9850 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9851 9851 pptr->port_state = FCP_STATE_INIT;
9852 9852 if (pinfo->port_acc_attr == NULL) {
9853 9853 /*
9854 9854 * The corresponding FCA doesn't support DMA at all
9855 9855 */
9856 9856 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9857 9857 }
9858 9858
9859 9859 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9860 9860
9861 9861 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9862 9862 /*
9863 9863 * If FCA supports DMA in SCSI data phase, we need preallocate
9864 9864 * dma cookie, so stash the cookie size
9865 9865 */
9866 9866 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9867 9867 pptr->port_data_dma_attr.dma_attr_sgllen;
9868 9868 }
9869 9869
9870 9870 /*
9871 9871 * The two mutexes of fcp_port are initialized. The variable
9872 9872 * mutex_initted is incremented to remember that fact. That variable
9873 9873 * is checked when the routine fails and the mutexes have to be
9874 9874 * destroyed.
9875 9875 */
9876 9876 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9877 9877 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9878 9878 mutex_initted++;
9879 9879
9880 9880 /*
9881 9881 * The SCSI tran structure is allocate and initialized now.
9882 9882 */
9883 9883 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9884 9884 fcp_log(CE_WARN, pptr->port_dip,
9885 9885 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9886 9886 goto fail;
9887 9887 }
9888 9888
9889 9889 /* link in the transport structure then fill it in */
9890 9890 pptr->port_tran = tran;
9891 9891 tran->tran_hba_private = pptr;
9892 9892 tran->tran_tgt_init = fcp_scsi_tgt_init;
9893 9893 tran->tran_tgt_probe = NULL;
9894 9894 tran->tran_tgt_free = fcp_scsi_tgt_free;
9895 9895 tran->tran_start = fcp_scsi_start;
9896 9896 tran->tran_reset = fcp_scsi_reset;
9897 9897 tran->tran_abort = fcp_scsi_abort;
9898 9898 tran->tran_getcap = fcp_scsi_getcap;
9899 9899 tran->tran_setcap = fcp_scsi_setcap;
9900 9900 tran->tran_init_pkt = NULL;
9901 9901 tran->tran_destroy_pkt = NULL;
9902 9902 tran->tran_dmafree = NULL;
9903 9903 tran->tran_sync_pkt = NULL;
9904 9904 tran->tran_reset_notify = fcp_scsi_reset_notify;
9905 9905 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9906 9906 tran->tran_get_name = fcp_scsi_get_name;
9907 9907 tran->tran_clear_aca = NULL;
9908 9908 tran->tran_clear_task_set = NULL;
9909 9909 tran->tran_terminate_task = NULL;
9910 9910 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9911 9911 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9912 9912 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9913 9913 tran->tran_post_event = fcp_scsi_bus_post_event;
9914 9914 tran->tran_quiesce = NULL;
9915 9915 tran->tran_unquiesce = NULL;
9916 9916 tran->tran_bus_reset = NULL;
9917 9917 tran->tran_bus_config = fcp_scsi_bus_config;
9918 9918 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9919 9919 tran->tran_bus_power = NULL;
9920 9920 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9921 9921
9922 9922 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9923 9923 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9924 9924 tran->tran_setup_pkt = fcp_pkt_setup;
9925 9925 tran->tran_teardown_pkt = fcp_pkt_teardown;
9926 9926 tran->tran_hba_len = pptr->port_priv_pkt_len +
9927 9927 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9928 9928 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9929 9929 /*
9930 9930 * If FCA don't support DMA, then we use different vectors to
9931 9931 * minimize the effects on DMA code flow path
9932 9932 */
9933 9933 tran->tran_start = fcp_pseudo_start;
9934 9934 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9935 9935 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9936 9936 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9937 9937 tran->tran_dmafree = fcp_pseudo_dmafree;
9938 9938 tran->tran_setup_pkt = NULL;
9939 9939 tran->tran_teardown_pkt = NULL;
9940 9940 tran->tran_pkt_constructor = NULL;
9941 9941 tran->tran_pkt_destructor = NULL;
9942 9942 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9943 9943 }
9944 9944
9945 9945 /*
9946 9946 * Allocate an ndi event handle
9947 9947 */
9948 9948 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9949 9949 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9950 9950
9951 9951 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9952 9952 sizeof (fcp_ndi_event_defs));
9953 9953
9954 9954 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9955 9955 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9956 9956
9957 9957 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9958 9958 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9959 9959 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9960 9960
9961 9961 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9962 9962 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9963 9963 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9964 9964 goto fail;
9965 9965 }
9966 9966 event_bind++; /* Checked in fail case */
9967 9967
9968 9968 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9969 9969 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9970 9970 != DDI_SUCCESS) {
9971 9971 fcp_log(CE_WARN, pptr->port_dip,
9972 9972 "!fcp%d: scsi_hba_attach_setup failed", instance);
9973 9973 goto fail;
9974 9974 }
9975 9975 hba_attached++; /* Checked in fail case */
9976 9976
9977 9977 pptr->port_mpxio = 0;
9978 9978 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9979 9979 MDI_SUCCESS) {
9980 9980 pptr->port_mpxio++;
9981 9981 }
9982 9982
9983 9983 /*
9984 9984 * The following code is putting the new port structure in the global
9985 9985 * list of ports and, if it is the first port to attach, it start the
9986 9986 * fcp_watchdog_tick.
9987 9987 *
9988 9988 * Why put this new port in the global before we are done attaching it?
9989 9989 * We are actually making the structure globally known before we are
9990 9990 * done attaching it. The reason for that is: because of the code that
9991 9991 * follows. At this point the resources to handle the port are
9992 9992 * allocated. This function is now going to do the following:
9993 9993 *
9994 9994 * 1) It is going to try to register with the name server advertizing
9995 9995 * the new FCP capability of the port.
9996 9996 * 2) It is going to play the role of the fp/fctl layer by building
9997 9997 * a list of worlwide names reachable through this port and call
9998 9998 * itself on fcp_statec_callback(). That requires the port to
9999 9999 * be part of the global list.
↓ open down ↓ |
5202 lines elided |
↑ open up ↑ |
10000 10000 */
10001 10001 mutex_enter(&fcp_global_mutex);
10002 10002 if (fcp_port_head == NULL) {
10003 10003 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10004 10004 }
10005 10005 pptr->port_next = fcp_port_head;
10006 10006 fcp_port_head = pptr;
10007 10007 soft_state_linked++;
10008 10008
10009 10009 if (fcp_watchdog_init++ == 0) {
10010 - fcp_watchdog_tick = fcp_watchdog_timeout *
10011 - drv_usectohz(1000000);
10010 + fcp_watchdog_tick = drv_sectohz(fcp_watchdog_timeout);
10012 10011 fcp_watchdog_id = timeout(fcp_watch, NULL,
10013 10012 fcp_watchdog_tick);
10014 10013 }
10015 10014 mutex_exit(&fcp_global_mutex);
10016 10015
10017 10016 /*
10018 10017 * Here an attempt is made to register with the name server, the new
10019 10018 * FCP capability. That is done using an RTF_ID to the name server.
10020 10019 * It is done synchronously. The function fcp_do_ns_registry()
10021 10020 * doesn't return till the name server responded.
10022 10021 * On failures, just ignore it for now and it will get retried during
10023 10022 * state change callbacks. We'll set a flag to show this failure
10024 10023 */
10025 10024 if (fcp_do_ns_registry(pptr, s_id)) {
10026 10025 mutex_enter(&pptr->port_mutex);
10027 10026 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10028 10027 mutex_exit(&pptr->port_mutex);
10029 10028 } else {
10030 10029 mutex_enter(&pptr->port_mutex);
10031 10030 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10032 10031 mutex_exit(&pptr->port_mutex);
10033 10032 }
10034 10033
10035 10034 /*
10036 10035 * Lookup for boot WWN property
10037 10036 */
10038 10037 if (modrootloaded != 1) {
10039 10038 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10040 10039 ddi_get_parent(pinfo->port_dip),
10041 10040 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10042 10041 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10043 10042 (nbytes == FC_WWN_SIZE)) {
10044 10043 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10045 10044 }
10046 10045 if (boot_wwn) {
10047 10046 ddi_prop_free(boot_wwn);
10048 10047 }
10049 10048 }
10050 10049
10051 10050 /*
10052 10051 * Handle various topologies and link states.
10053 10052 */
10054 10053 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10055 10054 case FC_STATE_OFFLINE:
10056 10055
10057 10056 /*
10058 10057 * we're attaching a port where the link is offline
10059 10058 *
10060 10059 * Wait for ONLINE, at which time a state
10061 10060 * change will cause a statec_callback
10062 10061 *
10063 10062 * in the mean time, do not do anything
10064 10063 */
10065 10064 res = DDI_SUCCESS;
10066 10065 pptr->port_state |= FCP_STATE_OFFLINE;
10067 10066 break;
10068 10067
10069 10068 case FC_STATE_ONLINE: {
10070 10069 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10071 10070 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10072 10071 res = DDI_SUCCESS;
10073 10072 break;
10074 10073 }
10075 10074 /*
10076 10075 * discover devices and create nodes (a private
10077 10076 * loop or point-to-point)
10078 10077 */
10079 10078 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10080 10079
10081 10080 /*
10082 10081 * At this point we are going to build a list of all the ports
10083 10082 * that can be reached through this local port. It looks like
10084 10083 * we cannot handle more than FCP_MAX_DEVICES per local port
10085 10084 * (128).
10086 10085 */
10087 10086 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10088 10087 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10089 10088 KM_NOSLEEP)) == NULL) {
10090 10089 fcp_log(CE_WARN, pptr->port_dip,
10091 10090 "!fcp%d: failed to allocate portmap",
10092 10091 instance);
10093 10092 goto fail;
10094 10093 }
10095 10094
10096 10095 /*
10097 10096 * fc_ulp_getportmap() is going to provide us with the list of
10098 10097 * remote ports in the buffer we just allocated. The way the
10099 10098 * list is going to be retrieved depends on the topology.
10100 10099 * However, if we are connected to a Fabric, a name server
10101 10100 * request may be sent to get the list of FCP capable ports.
10102 10101 * It should be noted that is the case the request is
10103 10102 * synchronous. This means we are stuck here till the name
10104 10103 * server replies. A lot of things can change during that time
10105 10104 * and including, may be, being called on
10106 10105 * fcp_statec_callback() for different reasons. I'm not sure
10107 10106 * the code can handle that.
10108 10107 */
10109 10108 max_cnt = FCP_MAX_DEVICES;
10110 10109 alloc_cnt = FCP_MAX_DEVICES;
10111 10110 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10112 10111 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10113 10112 FC_SUCCESS) {
10114 10113 caddr_t msg;
10115 10114
10116 10115 (void) fc_ulp_error(res, &msg);
10117 10116
10118 10117 /*
10119 10118 * this just means the transport is
10120 10119 * busy perhaps building a portmap so,
10121 10120 * for now, succeed this port attach
10122 10121 * when the transport has a new map,
10123 10122 * it'll send us a state change then
10124 10123 */
10125 10124 fcp_log(CE_WARN, pptr->port_dip,
10126 10125 "!failed to get port map : %s", msg);
10127 10126
10128 10127 res = DDI_SUCCESS;
10129 10128 break; /* go return result */
10130 10129 }
10131 10130 if (max_cnt > alloc_cnt) {
10132 10131 alloc_cnt = max_cnt;
10133 10132 }
10134 10133
10135 10134 /*
10136 10135 * We are now going to call fcp_statec_callback() ourselves.
10137 10136 * By issuing this call we are trying to kick off the enumera-
10138 10137 * tion process.
10139 10138 */
10140 10139 /*
10141 10140 * let the state change callback do the SCSI device
10142 10141 * discovery and create the devinfos
10143 10142 */
10144 10143 fcp_statec_callback(ulph, pptr->port_fp_handle,
10145 10144 pptr->port_phys_state, pptr->port_topology, tmp_list,
10146 10145 max_cnt, pptr->port_id);
10147 10146
10148 10147 res = DDI_SUCCESS;
10149 10148 break;
10150 10149 }
10151 10150
10152 10151 default:
10153 10152 /* unknown port state */
10154 10153 fcp_log(CE_WARN, pptr->port_dip,
10155 10154 "!fcp%d: invalid port state at attach=0x%x",
10156 10155 instance, pptr->port_phys_state);
10157 10156
10158 10157 mutex_enter(&pptr->port_mutex);
10159 10158 pptr->port_phys_state = FCP_STATE_OFFLINE;
10160 10159 mutex_exit(&pptr->port_mutex);
10161 10160
10162 10161 res = DDI_SUCCESS;
10163 10162 break;
10164 10163 }
10165 10164
10166 10165 /* free temp list if used */
10167 10166 if (tmp_list != NULL) {
10168 10167 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10169 10168 }
10170 10169
10171 10170 /* note the attach time */
10172 10171 pptr->port_attach_time = ddi_get_lbolt64();
10173 10172
10174 10173 /* all done */
10175 10174 return (res);
10176 10175
10177 10176 /* a failure we have to clean up after */
10178 10177 fail:
10179 10178 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10180 10179
10181 10180 if (soft_state_linked) {
10182 10181 /* remove this fcp_port from the linked list */
10183 10182 (void) fcp_soft_state_unlink(pptr);
10184 10183 }
10185 10184
10186 10185 /* unbind and free event set */
10187 10186 if (pptr->port_ndi_event_hdl) {
10188 10187 if (event_bind) {
10189 10188 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10190 10189 &pptr->port_ndi_events, NDI_SLEEP);
10191 10190 }
10192 10191 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10193 10192 }
10194 10193
10195 10194 if (pptr->port_ndi_event_defs) {
10196 10195 (void) kmem_free(pptr->port_ndi_event_defs,
10197 10196 sizeof (fcp_ndi_event_defs));
10198 10197 }
10199 10198
10200 10199 /*
10201 10200 * Clean up mpxio stuff
10202 10201 */
10203 10202 if (pptr->port_mpxio) {
10204 10203 (void) mdi_phci_unregister(pptr->port_dip, 0);
10205 10204 pptr->port_mpxio--;
10206 10205 }
10207 10206
10208 10207 /* undo SCSI HBA setup */
10209 10208 if (hba_attached) {
10210 10209 (void) scsi_hba_detach(pptr->port_dip);
10211 10210 }
10212 10211 if (pptr->port_tran != NULL) {
10213 10212 scsi_hba_tran_free(pptr->port_tran);
10214 10213 }
10215 10214
10216 10215 mutex_enter(&fcp_global_mutex);
10217 10216
10218 10217 /*
10219 10218 * We check soft_state_linked, because it is incremented right before
10220 10219 * we call increment fcp_watchdog_init. Therefore, we know if
10221 10220 * soft_state_linked is still FALSE, we do not want to decrement
10222 10221 * fcp_watchdog_init or possibly call untimeout.
10223 10222 */
10224 10223
10225 10224 if (soft_state_linked) {
10226 10225 if (--fcp_watchdog_init == 0) {
10227 10226 timeout_id_t tid = fcp_watchdog_id;
10228 10227
10229 10228 mutex_exit(&fcp_global_mutex);
10230 10229 (void) untimeout(tid);
10231 10230 } else {
10232 10231 mutex_exit(&fcp_global_mutex);
10233 10232 }
10234 10233 } else {
10235 10234 mutex_exit(&fcp_global_mutex);
10236 10235 }
10237 10236
10238 10237 if (mutex_initted) {
10239 10238 mutex_destroy(&pptr->port_mutex);
10240 10239 mutex_destroy(&pptr->port_pkt_mutex);
10241 10240 }
10242 10241
10243 10242 if (tmp_list != NULL) {
10244 10243 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10245 10244 }
10246 10245
10247 10246 /* this makes pptr invalid */
10248 10247 ddi_soft_state_free(fcp_softstate, instance);
10249 10248
10250 10249 return (DDI_FAILURE);
10251 10250 }
10252 10251
10253 10252
10254 10253 static int
10255 10254 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10256 10255 {
10257 10256 int count = 0;
10258 10257
10259 10258 mutex_enter(&pptr->port_mutex);
10260 10259
10261 10260 /*
10262 10261 * if the port is powered down or suspended, nothing else
10263 10262 * to do; just return.
10264 10263 */
10265 10264 if (flag != FCP_STATE_DETACHING) {
10266 10265 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10267 10266 FCP_STATE_SUSPENDED)) {
10268 10267 pptr->port_state |= flag;
10269 10268 mutex_exit(&pptr->port_mutex);
10270 10269 return (FC_SUCCESS);
10271 10270 }
10272 10271 }
10273 10272
10274 10273 if (pptr->port_state & FCP_STATE_IN_MDI) {
10275 10274 mutex_exit(&pptr->port_mutex);
10276 10275 return (FC_FAILURE);
10277 10276 }
10278 10277
10279 10278 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10280 10279 fcp_trace, FCP_BUF_LEVEL_2, 0,
10281 10280 "fcp_handle_port_detach: port is detaching");
10282 10281
10283 10282 pptr->port_state |= flag;
10284 10283
10285 10284 /*
10286 10285 * Wait for any ongoing reconfig/ipkt to complete, that
10287 10286 * ensures the freeing to targets/luns is safe.
10288 10287 * No more ref to this port should happen from statec/ioctl
10289 10288 * after that as it was removed from the global port list.
10290 10289 */
↓ open down ↓ |
269 lines elided |
↑ open up ↑ |
10291 10290 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10292 10291 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10293 10292 /*
10294 10293 * Let's give sufficient time for reconfig/ipkt
10295 10294 * to complete.
10296 10295 */
10297 10296 if (count++ >= FCP_ICMD_DEADLINE) {
10298 10297 break;
10299 10298 }
10300 10299 mutex_exit(&pptr->port_mutex);
10301 - delay(drv_usectohz(1000000));
10300 + delay(drv_sectohz(1));
10302 10301 mutex_enter(&pptr->port_mutex);
10303 10302 }
10304 10303
10305 10304 /*
10306 10305 * if the driver is still busy then fail to
10307 10306 * suspend/power down.
10308 10307 */
10309 10308 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10310 10309 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10311 10310 pptr->port_state &= ~flag;
10312 10311 mutex_exit(&pptr->port_mutex);
10313 10312 return (FC_FAILURE);
10314 10313 }
10315 10314
10316 10315 if (flag == FCP_STATE_DETACHING) {
10317 10316 pptr = fcp_soft_state_unlink(pptr);
10318 10317 ASSERT(pptr != NULL);
10319 10318 }
10320 10319
10321 10320 pptr->port_link_cnt++;
10322 10321 pptr->port_state |= FCP_STATE_OFFLINE;
10323 10322 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10324 10323
10325 10324 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10326 10325 FCP_CAUSE_LINK_DOWN);
10327 10326 mutex_exit(&pptr->port_mutex);
10328 10327
10329 10328 /* kill watch dog timer if we're the last */
10330 10329 mutex_enter(&fcp_global_mutex);
10331 10330 if (--fcp_watchdog_init == 0) {
10332 10331 timeout_id_t tid = fcp_watchdog_id;
10333 10332 mutex_exit(&fcp_global_mutex);
10334 10333 (void) untimeout(tid);
10335 10334 } else {
10336 10335 mutex_exit(&fcp_global_mutex);
10337 10336 }
10338 10337
10339 10338 /* clean up the port structures */
10340 10339 if (flag == FCP_STATE_DETACHING) {
10341 10340 fcp_cleanup_port(pptr, instance);
10342 10341 }
10343 10342
10344 10343 return (FC_SUCCESS);
10345 10344 }
10346 10345
10347 10346
10348 10347 static void
10349 10348 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10350 10349 {
10351 10350 ASSERT(pptr != NULL);
10352 10351
10353 10352 /* unbind and free event set */
10354 10353 if (pptr->port_ndi_event_hdl) {
10355 10354 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10356 10355 &pptr->port_ndi_events, NDI_SLEEP);
10357 10356 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10358 10357 }
10359 10358
10360 10359 if (pptr->port_ndi_event_defs) {
10361 10360 (void) kmem_free(pptr->port_ndi_event_defs,
10362 10361 sizeof (fcp_ndi_event_defs));
10363 10362 }
10364 10363
10365 10364 /* free the lun/target structures and devinfos */
10366 10365 fcp_free_targets(pptr);
10367 10366
10368 10367 /*
10369 10368 * Clean up mpxio stuff
10370 10369 */
10371 10370 if (pptr->port_mpxio) {
10372 10371 (void) mdi_phci_unregister(pptr->port_dip, 0);
10373 10372 pptr->port_mpxio--;
10374 10373 }
10375 10374
10376 10375 /* clean up SCSA stuff */
10377 10376 (void) scsi_hba_detach(pptr->port_dip);
10378 10377 if (pptr->port_tran != NULL) {
10379 10378 scsi_hba_tran_free(pptr->port_tran);
10380 10379 }
10381 10380
10382 10381 #ifdef KSTATS_CODE
10383 10382 /* clean up kstats */
10384 10383 if (pptr->fcp_ksp != NULL) {
10385 10384 kstat_delete(pptr->fcp_ksp);
10386 10385 }
10387 10386 #endif
10388 10387
10389 10388 /* clean up soft state mutexes/condition variables */
10390 10389 mutex_destroy(&pptr->port_mutex);
10391 10390 mutex_destroy(&pptr->port_pkt_mutex);
10392 10391
10393 10392 /* all done with soft state */
10394 10393 ddi_soft_state_free(fcp_softstate, instance);
10395 10394 }
10396 10395
10397 10396 /*
10398 10397 * Function: fcp_kmem_cache_constructor
10399 10398 *
10400 10399 * Description: This function allocates and initializes the resources required
10401 10400 * to build a scsi_pkt structure the target driver. The result
10402 10401 * of the allocation and initialization will be cached in the
10403 10402 * memory cache. As DMA resources may be allocated here, that
10404 10403 * means DMA resources will be tied up in the cache manager.
10405 10404 * This is a tradeoff that has been made for performance reasons.
10406 10405 *
10407 10406 * Argument: *buf Memory to preinitialize.
10408 10407 * *arg FCP port structure (fcp_port).
10409 10408 * kmflags Value passed to kmem_cache_alloc() and
10410 10409 * propagated to the constructor.
10411 10410 *
10412 10411 * Return Value: 0 Allocation/Initialization was successful.
10413 10412 * -1 Allocation or Initialization failed.
10414 10413 *
10415 10414 *
10416 10415 * If the returned value is 0, the buffer is initialized like this:
10417 10416 *
10418 10417 * +================================+
10419 10418 * +----> | struct scsi_pkt |
10420 10419 * | | |
10421 10420 * | +--- | pkt_ha_private |
10422 10421 * | | | |
10423 10422 * | | +================================+
10424 10423 * | |
10425 10424 * | | +================================+
10426 10425 * | +--> | struct fcp_pkt | <---------+
10427 10426 * | | | |
10428 10427 * +----- | cmd_pkt | |
10429 10428 * | cmd_fp_pkt | ---+ |
10430 10429 * +-------->| cmd_fcp_rsp[] | | |
10431 10430 * | +--->| cmd_fcp_cmd[] | | |
10432 10431 * | | |--------------------------------| | |
10433 10432 * | | | struct fc_packet | <--+ |
10434 10433 * | | | | |
10435 10434 * | | | pkt_ulp_private | ----------+
10436 10435 * | | | pkt_fca_private | -----+
10437 10436 * | | | pkt_data_cookie | ---+ |
10438 10437 * | | | pkt_cmdlen | | |
10439 10438 * | |(a) | pkt_rsplen | | |
10440 10439 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10441 10440 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10442 10441 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10443 10442 * | pkt_resp_cookie | ---|-|--+ | | |
10444 10443 * | pkt_cmd_dma | | | | | | |
10445 10444 * | pkt_cmd_acc | | | | | | |
10446 10445 * +================================+ | | | | | |
10447 10446 * | dma_cookies | <--+ | | | | |
10448 10447 * | | | | | | |
10449 10448 * +================================+ | | | | |
10450 10449 * | fca_private | <----+ | | | |
10451 10450 * | | | | | |
10452 10451 * +================================+ | | | |
10453 10452 * | | | |
10454 10453 * | | | |
10455 10454 * +================================+ (d) | | | |
10456 10455 * | fcp_resp cookies | <-------+ | | |
10457 10456 * | | | | |
10458 10457 * +================================+ | | |
10459 10458 * | | |
10460 10459 * +================================+ (d) | | |
10461 10460 * | fcp_resp | <-----------+ | |
10462 10461 * | (DMA resources associated) | | |
10463 10462 * +================================+ | |
10464 10463 * | |
10465 10464 * | |
10466 10465 * | |
10467 10466 * +================================+ (c) | |
10468 10467 * | fcp_cmd cookies | <---------------+ |
10469 10468 * | | |
10470 10469 * +================================+ |
10471 10470 * |
10472 10471 * +================================+ (c) |
10473 10472 * | fcp_cmd | <--------------------+
10474 10473 * | (DMA resources associated) |
10475 10474 * +================================+
10476 10475 *
10477 10476 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10478 10477 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10479 10478 * (c) Only if DMA is used for the FCP_CMD buffer.
10480 10479 * (d) Only if DMA is used for the FCP_RESP buffer
10481 10480 */
10482 10481 static int
10483 10482 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10484 10483 int kmflags)
10485 10484 {
10486 10485 struct fcp_pkt *cmd;
10487 10486 struct fcp_port *pptr;
10488 10487 fc_packet_t *fpkt;
10489 10488
10490 10489 pptr = (struct fcp_port *)tran->tran_hba_private;
10491 10490 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10492 10491 bzero(cmd, tran->tran_hba_len);
10493 10492
10494 10493 cmd->cmd_pkt = pkt;
10495 10494 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10496 10495 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10497 10496 cmd->cmd_fp_pkt = fpkt;
10498 10497
10499 10498 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10500 10499 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10501 10500 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10502 10501 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10503 10502
10504 10503 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10505 10504 sizeof (struct fcp_pkt));
10506 10505
10507 10506 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10508 10507 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10509 10508
10510 10509 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10511 10510 /*
10512 10511 * The underlying HBA doesn't want to DMA the fcp_cmd or
10513 10512 * fcp_resp. The transfer of information will be done by
10514 10513 * bcopy.
10515 10514 * The naming of the flags (that is actually a value) is
10516 10515 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10517 10516 * DMA" but instead "NO DMA".
10518 10517 */
10519 10518 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10520 10519 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10521 10520 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10522 10521 } else {
10523 10522 /*
10524 10523 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10525 10524 * buffer. A buffer is allocated for each one the ddi_dma_*
10526 10525 * interfaces.
10527 10526 */
10528 10527 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10529 10528 return (-1);
10530 10529 }
10531 10530 }
10532 10531
10533 10532 return (0);
10534 10533 }
10535 10534
10536 10535 /*
10537 10536 * Function: fcp_kmem_cache_destructor
10538 10537 *
10539 10538 * Description: Called by the destructor of the cache managed by SCSA.
10540 10539 * All the resources pre-allocated in fcp_pkt_constructor
10541 10540 * and the data also pre-initialized in fcp_pkt_constructor
10542 10541 * are freed and uninitialized here.
10543 10542 *
10544 10543 * Argument: *buf Memory to uninitialize.
10545 10544 * *arg FCP port structure (fcp_port).
10546 10545 *
10547 10546 * Return Value: None
10548 10547 *
10549 10548 * Context: kernel
10550 10549 */
10551 10550 static void
10552 10551 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10553 10552 {
10554 10553 struct fcp_pkt *cmd;
10555 10554 struct fcp_port *pptr;
10556 10555
10557 10556 pptr = (struct fcp_port *)(tran->tran_hba_private);
10558 10557 cmd = pkt->pkt_ha_private;
10559 10558
10560 10559 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10561 10560 /*
10562 10561 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10563 10562 * buffer and DMA resources allocated to do so are released.
10564 10563 */
10565 10564 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10566 10565 }
10567 10566 }
10568 10567
10569 10568 /*
10570 10569 * Function: fcp_alloc_cmd_resp
10571 10570 *
10572 10571 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10573 10572 * will be DMAed by the HBA. The buffer is allocated applying
10574 10573 * the DMA requirements for the HBA. The buffers allocated will
10575 10574 * also be bound. DMA resources are allocated in the process.
10576 10575 * They will be released by fcp_free_cmd_resp().
10577 10576 *
10578 10577 * Argument: *pptr FCP port.
10579 10578 * *fpkt fc packet for which the cmd and resp packet should be
10580 10579 * allocated.
10581 10580 * flags Allocation flags.
10582 10581 *
10583 10582 * Return Value: FC_FAILURE
10584 10583 * FC_SUCCESS
10585 10584 *
10586 10585 * Context: User or Kernel context only if flags == KM_SLEEP.
10587 10586 * Interrupt context if the KM_SLEEP is not specified.
10588 10587 */
10589 10588 static int
10590 10589 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10591 10590 {
10592 10591 int rval;
10593 10592 int cmd_len;
10594 10593 int resp_len;
10595 10594 ulong_t real_len;
10596 10595 int (*cb) (caddr_t);
10597 10596 ddi_dma_cookie_t pkt_cookie;
10598 10597 ddi_dma_cookie_t *cp;
10599 10598 uint32_t cnt;
10600 10599
10601 10600 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10602 10601
10603 10602 cmd_len = fpkt->pkt_cmdlen;
10604 10603 resp_len = fpkt->pkt_rsplen;
10605 10604
10606 10605 ASSERT(fpkt->pkt_cmd_dma == NULL);
10607 10606
10608 10607 /* Allocation of a DMA handle used in subsequent calls. */
10609 10608 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10610 10609 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10611 10610 return (FC_FAILURE);
10612 10611 }
10613 10612
10614 10613 /* A buffer is allocated that satisfies the DMA requirements. */
10615 10614 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10616 10615 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10617 10616 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10618 10617
10619 10618 if (rval != DDI_SUCCESS) {
10620 10619 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10621 10620 return (FC_FAILURE);
10622 10621 }
10623 10622
10624 10623 if (real_len < cmd_len) {
10625 10624 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10626 10625 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10627 10626 return (FC_FAILURE);
10628 10627 }
10629 10628
10630 10629 /* The buffer allocated is DMA bound. */
10631 10630 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10632 10631 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10633 10632 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10634 10633
10635 10634 if (rval != DDI_DMA_MAPPED) {
10636 10635 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10637 10636 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10638 10637 return (FC_FAILURE);
10639 10638 }
10640 10639
10641 10640 if (fpkt->pkt_cmd_cookie_cnt >
10642 10641 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10643 10642 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10644 10643 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10645 10644 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10646 10645 return (FC_FAILURE);
10647 10646 }
10648 10647
10649 10648 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10650 10649
10651 10650 /*
10652 10651 * The buffer where the scatter/gather list is going to be built is
10653 10652 * allocated.
10654 10653 */
10655 10654 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10656 10655 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10657 10656 KM_NOSLEEP);
10658 10657
10659 10658 if (cp == NULL) {
10660 10659 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10661 10660 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10662 10661 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10663 10662 return (FC_FAILURE);
10664 10663 }
10665 10664
10666 10665 /*
10667 10666 * The scatter/gather list for the buffer we just allocated is built
10668 10667 * here.
10669 10668 */
10670 10669 *cp = pkt_cookie;
10671 10670 cp++;
10672 10671
10673 10672 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10674 10673 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10675 10674 &pkt_cookie);
10676 10675 *cp = pkt_cookie;
10677 10676 }
10678 10677
10679 10678 ASSERT(fpkt->pkt_resp_dma == NULL);
10680 10679 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10681 10680 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10682 10681 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10683 10682 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10684 10683 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10685 10684 return (FC_FAILURE);
10686 10685 }
10687 10686
10688 10687 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10689 10688 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10690 10689 (caddr_t *)&fpkt->pkt_resp, &real_len,
10691 10690 &fpkt->pkt_resp_acc);
10692 10691
10693 10692 if (rval != DDI_SUCCESS) {
10694 10693 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10695 10694 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10696 10695 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10697 10696 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10698 10697 kmem_free(fpkt->pkt_cmd_cookie,
10699 10698 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10700 10699 return (FC_FAILURE);
10701 10700 }
10702 10701
10703 10702 if (real_len < resp_len) {
10704 10703 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10705 10704 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10706 10705 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10707 10706 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10708 10707 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10709 10708 kmem_free(fpkt->pkt_cmd_cookie,
10710 10709 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10711 10710 return (FC_FAILURE);
10712 10711 }
10713 10712
10714 10713 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10715 10714 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10716 10715 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10717 10716
10718 10717 if (rval != DDI_DMA_MAPPED) {
10719 10718 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10720 10719 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10721 10720 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10722 10721 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10723 10722 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10724 10723 kmem_free(fpkt->pkt_cmd_cookie,
10725 10724 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10726 10725 return (FC_FAILURE);
10727 10726 }
10728 10727
10729 10728 if (fpkt->pkt_resp_cookie_cnt >
10730 10729 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10731 10730 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10732 10731 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10733 10732 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10734 10733 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10735 10734 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10736 10735 kmem_free(fpkt->pkt_cmd_cookie,
10737 10736 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10738 10737 return (FC_FAILURE);
10739 10738 }
10740 10739
10741 10740 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10742 10741
10743 10742 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10744 10743 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10745 10744 KM_NOSLEEP);
10746 10745
10747 10746 if (cp == NULL) {
10748 10747 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10749 10748 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10750 10749 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10751 10750 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10752 10751 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10753 10752 kmem_free(fpkt->pkt_cmd_cookie,
10754 10753 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10755 10754 return (FC_FAILURE);
10756 10755 }
10757 10756
10758 10757 *cp = pkt_cookie;
10759 10758 cp++;
10760 10759
10761 10760 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10762 10761 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10763 10762 &pkt_cookie);
10764 10763 *cp = pkt_cookie;
10765 10764 }
10766 10765
10767 10766 return (FC_SUCCESS);
10768 10767 }
10769 10768
10770 10769 /*
10771 10770 * Function: fcp_free_cmd_resp
10772 10771 *
10773 10772 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10774 10773 * allocated by fcp_alloc_cmd_resp() and all the resources
10775 10774 * associated with them. That includes the DMA resources and the
10776 10775 * buffer allocated for the cookies of each one of them.
10777 10776 *
10778 10777 * Argument: *pptr FCP port context.
10779 10778 * *fpkt fc packet containing the cmd and resp packet
10780 10779 * to be released.
10781 10780 *
10782 10781 * Return Value: None
10783 10782 *
10784 10783 * Context: Interrupt, User and Kernel context.
10785 10784 */
10786 10785 /* ARGSUSED */
10787 10786 static void
10788 10787 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10789 10788 {
10790 10789 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10791 10790
10792 10791 if (fpkt->pkt_resp_dma) {
10793 10792 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10794 10793 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10795 10794 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10796 10795 }
10797 10796
10798 10797 if (fpkt->pkt_resp_cookie) {
10799 10798 kmem_free(fpkt->pkt_resp_cookie,
10800 10799 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10801 10800 fpkt->pkt_resp_cookie = NULL;
10802 10801 }
10803 10802
10804 10803 if (fpkt->pkt_cmd_dma) {
10805 10804 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10806 10805 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10807 10806 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10808 10807 }
10809 10808
10810 10809 if (fpkt->pkt_cmd_cookie) {
10811 10810 kmem_free(fpkt->pkt_cmd_cookie,
10812 10811 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10813 10812 fpkt->pkt_cmd_cookie = NULL;
10814 10813 }
10815 10814 }
10816 10815
10817 10816
10818 10817 /*
10819 10818 * called by the transport to do our own target initialization
10820 10819 *
10821 10820 * can acquire and release the global mutex
10822 10821 */
10823 10822 /* ARGSUSED */
10824 10823 static int
10825 10824 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10826 10825 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10827 10826 {
10828 10827 uchar_t *bytes;
10829 10828 uint_t nbytes;
10830 10829 uint16_t lun_num;
10831 10830 struct fcp_tgt *ptgt;
10832 10831 struct fcp_lun *plun;
10833 10832 struct fcp_port *pptr = (struct fcp_port *)
10834 10833 hba_tran->tran_hba_private;
10835 10834
10836 10835 ASSERT(pptr != NULL);
10837 10836
10838 10837 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10839 10838 FCP_BUF_LEVEL_8, 0,
10840 10839 "fcp_phys_tgt_init: called for %s (instance %d)",
10841 10840 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10842 10841
10843 10842 /* get our port WWN property */
10844 10843 bytes = NULL;
10845 10844 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10846 10845 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10847 10846 (nbytes != FC_WWN_SIZE)) {
10848 10847 /* no port WWN property */
10849 10848 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10850 10849 FCP_BUF_LEVEL_8, 0,
10851 10850 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10852 10851 " for %s (instance %d): bytes=%p nbytes=%x",
10853 10852 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10854 10853 nbytes);
10855 10854
10856 10855 if (bytes != NULL) {
10857 10856 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10858 10857 }
10859 10858
10860 10859 return (DDI_NOT_WELL_FORMED);
10861 10860 }
10862 10861 ASSERT(bytes != NULL);
10863 10862
10864 10863 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10865 10864 LUN_PROP, 0xFFFF);
10866 10865 if (lun_num == 0xFFFF) {
10867 10866 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10868 10867 FCP_BUF_LEVEL_8, 0,
10869 10868 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10870 10869 " for %s (instance %d)", ddi_get_name(tgt_dip),
10871 10870 ddi_get_instance(tgt_dip));
10872 10871
10873 10872 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10874 10873 return (DDI_NOT_WELL_FORMED);
10875 10874 }
10876 10875
10877 10876 mutex_enter(&pptr->port_mutex);
10878 10877 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10879 10878 mutex_exit(&pptr->port_mutex);
10880 10879 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10881 10880 FCP_BUF_LEVEL_8, 0,
10882 10881 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10883 10882 " for %s (instance %d)", ddi_get_name(tgt_dip),
10884 10883 ddi_get_instance(tgt_dip));
10885 10884
10886 10885 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10887 10886 return (DDI_FAILURE);
10888 10887 }
10889 10888
10890 10889 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10891 10890 FC_WWN_SIZE) == 0);
10892 10891 ASSERT(plun->lun_num == lun_num);
10893 10892
10894 10893 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10895 10894
10896 10895 ptgt = plun->lun_tgt;
10897 10896
10898 10897 mutex_enter(&ptgt->tgt_mutex);
10899 10898 plun->lun_tgt_count++;
10900 10899 scsi_device_hba_private_set(sd, plun);
10901 10900 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10902 10901 plun->lun_sd = sd;
10903 10902 mutex_exit(&ptgt->tgt_mutex);
10904 10903 mutex_exit(&pptr->port_mutex);
10905 10904
10906 10905 return (DDI_SUCCESS);
10907 10906 }
10908 10907
10909 10908 /*ARGSUSED*/
10910 10909 static int
10911 10910 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10912 10911 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10913 10912 {
10914 10913 uchar_t *bytes;
10915 10914 uint_t nbytes;
10916 10915 uint16_t lun_num;
10917 10916 struct fcp_tgt *ptgt;
10918 10917 struct fcp_lun *plun;
10919 10918 struct fcp_port *pptr = (struct fcp_port *)
10920 10919 hba_tran->tran_hba_private;
10921 10920 child_info_t *cip;
10922 10921
10923 10922 ASSERT(pptr != NULL);
10924 10923
10925 10924 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10926 10925 fcp_trace, FCP_BUF_LEVEL_8, 0,
10927 10926 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10928 10927 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10929 10928 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10930 10929
10931 10930 cip = (child_info_t *)sd->sd_pathinfo;
10932 10931 if (cip == NULL) {
10933 10932 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10934 10933 fcp_trace, FCP_BUF_LEVEL_8, 0,
10935 10934 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10936 10935 " for %s (instance %d)", ddi_get_name(tgt_dip),
10937 10936 ddi_get_instance(tgt_dip));
10938 10937
10939 10938 return (DDI_NOT_WELL_FORMED);
10940 10939 }
10941 10940
10942 10941 /* get our port WWN property */
10943 10942 bytes = NULL;
10944 10943 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10945 10944 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10946 10945 (nbytes != FC_WWN_SIZE)) {
10947 10946 if (bytes) {
10948 10947 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10949 10948 }
10950 10949 return (DDI_NOT_WELL_FORMED);
10951 10950 }
10952 10951
10953 10952 ASSERT(bytes != NULL);
10954 10953
10955 10954 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10956 10955 LUN_PROP, 0xFFFF);
10957 10956 if (lun_num == 0xFFFF) {
10958 10957 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10959 10958 fcp_trace, FCP_BUF_LEVEL_8, 0,
10960 10959 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10961 10960 " for %s (instance %d)", ddi_get_name(tgt_dip),
10962 10961 ddi_get_instance(tgt_dip));
10963 10962
10964 10963 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10965 10964 return (DDI_NOT_WELL_FORMED);
10966 10965 }
10967 10966
10968 10967 mutex_enter(&pptr->port_mutex);
10969 10968 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10970 10969 mutex_exit(&pptr->port_mutex);
10971 10970 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10972 10971 fcp_trace, FCP_BUF_LEVEL_8, 0,
10973 10972 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10974 10973 " for %s (instance %d)", ddi_get_name(tgt_dip),
10975 10974 ddi_get_instance(tgt_dip));
10976 10975
10977 10976 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10978 10977 return (DDI_FAILURE);
10979 10978 }
10980 10979
10981 10980 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10982 10981 FC_WWN_SIZE) == 0);
10983 10982 ASSERT(plun->lun_num == lun_num);
10984 10983
10985 10984 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10986 10985
10987 10986 ptgt = plun->lun_tgt;
10988 10987
10989 10988 mutex_enter(&ptgt->tgt_mutex);
10990 10989 plun->lun_tgt_count++;
10991 10990 scsi_device_hba_private_set(sd, plun);
10992 10991 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10993 10992 plun->lun_sd = sd;
10994 10993 mutex_exit(&ptgt->tgt_mutex);
10995 10994 mutex_exit(&pptr->port_mutex);
10996 10995
10997 10996 return (DDI_SUCCESS);
10998 10997 }
10999 10998
11000 10999
11001 11000 /*
11002 11001 * called by the transport to do our own target initialization
11003 11002 *
11004 11003 * can acquire and release the global mutex
11005 11004 */
11006 11005 /* ARGSUSED */
11007 11006 static int
11008 11007 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11009 11008 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11010 11009 {
11011 11010 struct fcp_port *pptr = (struct fcp_port *)
11012 11011 hba_tran->tran_hba_private;
11013 11012 int rval;
11014 11013
11015 11014 ASSERT(pptr != NULL);
11016 11015
11017 11016 /*
11018 11017 * Child node is getting initialized. Look at the mpxio component
11019 11018 * type on the child device to see if this device is mpxio managed
11020 11019 * or not.
11021 11020 */
11022 11021 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11023 11022 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11024 11023 } else {
11025 11024 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11026 11025 }
11027 11026
11028 11027 return (rval);
11029 11028 }
11030 11029
11031 11030
11032 11031 /* ARGSUSED */
11033 11032 static void
11034 11033 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11035 11034 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11036 11035 {
11037 11036 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11038 11037 struct fcp_tgt *ptgt;
11039 11038
11040 11039 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11041 11040 fcp_trace, FCP_BUF_LEVEL_8, 0,
11042 11041 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11043 11042 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11044 11043 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11045 11044
11046 11045 if (plun == NULL) {
11047 11046 return;
11048 11047 }
11049 11048 ptgt = plun->lun_tgt;
11050 11049
11051 11050 ASSERT(ptgt != NULL);
11052 11051
11053 11052 mutex_enter(&ptgt->tgt_mutex);
11054 11053 ASSERT(plun->lun_tgt_count > 0);
11055 11054
11056 11055 if (--plun->lun_tgt_count == 0) {
11057 11056 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11058 11057 }
11059 11058 plun->lun_sd = NULL;
11060 11059 mutex_exit(&ptgt->tgt_mutex);
11061 11060 }
11062 11061
11063 11062 /*
11064 11063 * Function: fcp_scsi_start
11065 11064 *
11066 11065 * Description: This function is called by the target driver to request a
11067 11066 * command to be sent.
11068 11067 *
11069 11068 * Argument: *ap SCSI address of the device.
11070 11069 * *pkt SCSI packet containing the cmd to send.
11071 11070 *
11072 11071 * Return Value: TRAN_ACCEPT
11073 11072 * TRAN_BUSY
11074 11073 * TRAN_BADPKT
11075 11074 * TRAN_FATAL_ERROR
11076 11075 */
11077 11076 static int
11078 11077 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11079 11078 {
11080 11079 struct fcp_port *pptr = ADDR2FCP(ap);
11081 11080 struct fcp_lun *plun = ADDR2LUN(ap);
11082 11081 struct fcp_pkt *cmd = PKT2CMD(pkt);
11083 11082 struct fcp_tgt *ptgt = plun->lun_tgt;
11084 11083 int rval;
11085 11084
11086 11085 /* ensure command isn't already issued */
11087 11086 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11088 11087
11089 11088 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11090 11089 fcp_trace, FCP_BUF_LEVEL_9, 0,
11091 11090 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11092 11091
11093 11092 /*
11094 11093 * It is strange that we enter the fcp_port mutex and the target
11095 11094 * mutex to check the lun state (which has a mutex of its own).
11096 11095 */
11097 11096 mutex_enter(&pptr->port_mutex);
11098 11097 mutex_enter(&ptgt->tgt_mutex);
11099 11098
11100 11099 /*
11101 11100 * If the device is offline and is not in the process of coming
11102 11101 * online, fail the request.
11103 11102 */
11104 11103
11105 11104 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11106 11105 !(plun->lun_state & FCP_LUN_ONLINING)) {
11107 11106 mutex_exit(&ptgt->tgt_mutex);
11108 11107 mutex_exit(&pptr->port_mutex);
11109 11108
11110 11109 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11111 11110 pkt->pkt_reason = CMD_DEV_GONE;
11112 11111 }
11113 11112
11114 11113 return (TRAN_FATAL_ERROR);
11115 11114 }
11116 11115 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11117 11116
11118 11117 /*
11119 11118 * If we are suspended, kernel is trying to dump, so don't
11120 11119 * block, fail or defer requests - send them down right away.
11121 11120 * NOTE: If we are in panic (i.e. trying to dump), we can't
11122 11121 * assume we have been suspended. There is hardware such as
11123 11122 * the v880 that doesn't do PM. Thus, the check for
11124 11123 * ddi_in_panic.
11125 11124 *
11126 11125 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11127 11126 * of changing. So, if we can queue the packet, do it. Eventually,
11128 11127 * either the device will have gone away or changed and we can fail
11129 11128 * the request, or we can proceed if the device didn't change.
11130 11129 *
11131 11130 * If the pd in the target or the packet is NULL it's probably
11132 11131 * because the device has gone away, we allow the request to be
11133 11132 * put on the internal queue here in case the device comes back within
11134 11133 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11135 11134 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11136 11135 * could be NULL because the device was disappearing during or since
11137 11136 * packet initialization.
11138 11137 */
11139 11138
11140 11139 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11141 11140 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11142 11141 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11143 11142 (ptgt->tgt_pd_handle == NULL) ||
11144 11143 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11145 11144 /*
11146 11145 * If ((LUN is busy AND
11147 11146 * LUN not suspended AND
11148 11147 * The system is not in panic state) OR
11149 11148 * (The port is coming up))
11150 11149 *
11151 11150 * We check to see if the any of the flags FLAG_NOINTR or
11152 11151 * FLAG_NOQUEUE is set. If one of them is set the value
11153 11152 * returned will be TRAN_BUSY. If not, the request is queued.
11154 11153 */
11155 11154 mutex_exit(&ptgt->tgt_mutex);
11156 11155 mutex_exit(&pptr->port_mutex);
11157 11156
11158 11157 /* see if using interrupts is allowed (so queueing'll work) */
11159 11158 if (pkt->pkt_flags & FLAG_NOINTR) {
11160 11159 pkt->pkt_resid = 0;
11161 11160 return (TRAN_BUSY);
11162 11161 }
11163 11162 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11164 11163 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11165 11164 fcp_trace, FCP_BUF_LEVEL_9, 0,
11166 11165 "fcp_scsi_start: lun busy for pkt %p", pkt);
11167 11166 return (TRAN_BUSY);
11168 11167 }
11169 11168 #ifdef DEBUG
11170 11169 mutex_enter(&pptr->port_pkt_mutex);
11171 11170 pptr->port_npkts++;
11172 11171 mutex_exit(&pptr->port_pkt_mutex);
11173 11172 #endif /* DEBUG */
11174 11173
11175 11174 /* got queue up the pkt for later */
11176 11175 fcp_queue_pkt(pptr, cmd);
11177 11176 return (TRAN_ACCEPT);
11178 11177 }
11179 11178 cmd->cmd_state = FCP_PKT_ISSUED;
11180 11179
11181 11180 mutex_exit(&ptgt->tgt_mutex);
11182 11181 mutex_exit(&pptr->port_mutex);
11183 11182
11184 11183 /*
11185 11184 * Now that we released the mutexes, what was protected by them can
11186 11185 * change.
11187 11186 */
11188 11187
11189 11188 /*
11190 11189 * If there is a reconfiguration in progress, wait for it to complete.
11191 11190 */
11192 11191 fcp_reconfig_wait(pptr);
11193 11192
11194 11193 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11195 11194 pkt->pkt_time : 0;
11196 11195
11197 11196 /* prepare the packet */
11198 11197
11199 11198 fcp_prepare_pkt(pptr, cmd, plun);
11200 11199
11201 11200 if (cmd->cmd_pkt->pkt_time) {
11202 11201 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11203 11202 } else {
11204 11203 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11205 11204 }
11206 11205
11207 11206 /*
11208 11207 * if interrupts aren't allowed (e.g. at dump time) then we'll
11209 11208 * have to do polled I/O
11210 11209 */
11211 11210 if (pkt->pkt_flags & FLAG_NOINTR) {
11212 11211 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11213 11212 return (fcp_dopoll(pptr, cmd));
11214 11213 }
11215 11214
11216 11215 #ifdef DEBUG
11217 11216 mutex_enter(&pptr->port_pkt_mutex);
11218 11217 pptr->port_npkts++;
11219 11218 mutex_exit(&pptr->port_pkt_mutex);
11220 11219 #endif /* DEBUG */
11221 11220
11222 11221 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11223 11222 if (rval == FC_SUCCESS) {
11224 11223 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11225 11224 fcp_trace, FCP_BUF_LEVEL_9, 0,
11226 11225 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11227 11226 return (TRAN_ACCEPT);
11228 11227 }
11229 11228
11230 11229 cmd->cmd_state = FCP_PKT_IDLE;
11231 11230
11232 11231 #ifdef DEBUG
11233 11232 mutex_enter(&pptr->port_pkt_mutex);
11234 11233 pptr->port_npkts--;
11235 11234 mutex_exit(&pptr->port_pkt_mutex);
11236 11235 #endif /* DEBUG */
11237 11236
11238 11237 /*
11239 11238 * For lack of clearer definitions, choose
11240 11239 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11241 11240 */
11242 11241
11243 11242 if (rval == FC_TRAN_BUSY) {
11244 11243 pkt->pkt_resid = 0;
11245 11244 rval = TRAN_BUSY;
11246 11245 } else {
11247 11246 mutex_enter(&ptgt->tgt_mutex);
11248 11247 if (plun->lun_state & FCP_LUN_OFFLINE) {
11249 11248 child_info_t *cip;
11250 11249
11251 11250 mutex_enter(&plun->lun_mutex);
11252 11251 cip = plun->lun_cip;
11253 11252 mutex_exit(&plun->lun_mutex);
11254 11253
11255 11254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11256 11255 fcp_trace, FCP_BUF_LEVEL_6, 0,
11257 11256 "fcp_transport failed 2 for %x: %x; dip=%p",
11258 11257 plun->lun_tgt->tgt_d_id, rval, cip);
11259 11258
11260 11259 rval = TRAN_FATAL_ERROR;
11261 11260 } else {
11262 11261 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11263 11262 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11264 11263 fcp_trace, FCP_BUF_LEVEL_9, 0,
11265 11264 "fcp_scsi_start: FC_BUSY for pkt %p",
11266 11265 pkt);
11267 11266 rval = TRAN_BUSY;
11268 11267 } else {
11269 11268 rval = TRAN_ACCEPT;
11270 11269 fcp_queue_pkt(pptr, cmd);
11271 11270 }
11272 11271 }
11273 11272 mutex_exit(&ptgt->tgt_mutex);
11274 11273 }
11275 11274
11276 11275 return (rval);
11277 11276 }
11278 11277
11279 11278 /*
11280 11279 * called by the transport to abort a packet
11281 11280 */
11282 11281 /*ARGSUSED*/
11283 11282 static int
11284 11283 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11285 11284 {
11286 11285 int tgt_cnt;
11287 11286 struct fcp_port *pptr = ADDR2FCP(ap);
11288 11287 struct fcp_lun *plun = ADDR2LUN(ap);
11289 11288 struct fcp_tgt *ptgt = plun->lun_tgt;
11290 11289
11291 11290 if (pkt == NULL) {
11292 11291 if (ptgt) {
11293 11292 mutex_enter(&ptgt->tgt_mutex);
11294 11293 tgt_cnt = ptgt->tgt_change_cnt;
11295 11294 mutex_exit(&ptgt->tgt_mutex);
11296 11295 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11297 11296 return (TRUE);
11298 11297 }
11299 11298 }
11300 11299 return (FALSE);
11301 11300 }
11302 11301
11303 11302
11304 11303 /*
11305 11304 * Perform reset
11306 11305 */
11307 11306 int
11308 11307 fcp_scsi_reset(struct scsi_address *ap, int level)
11309 11308 {
11310 11309 int rval = 0;
11311 11310 struct fcp_port *pptr = ADDR2FCP(ap);
11312 11311 struct fcp_lun *plun = ADDR2LUN(ap);
11313 11312 struct fcp_tgt *ptgt = plun->lun_tgt;
11314 11313
11315 11314 if (level == RESET_ALL) {
11316 11315 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11317 11316 rval = 1;
11318 11317 }
11319 11318 } else if (level == RESET_TARGET || level == RESET_LUN) {
11320 11319 /*
11321 11320 * If we are in the middle of discovery, return
11322 11321 * SUCCESS as this target will be rediscovered
11323 11322 * anyway
11324 11323 */
11325 11324 mutex_enter(&ptgt->tgt_mutex);
11326 11325 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11327 11326 mutex_exit(&ptgt->tgt_mutex);
11328 11327 return (1);
11329 11328 }
11330 11329 mutex_exit(&ptgt->tgt_mutex);
11331 11330
11332 11331 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11333 11332 rval = 1;
11334 11333 }
11335 11334 }
11336 11335 return (rval);
11337 11336 }
11338 11337
11339 11338
11340 11339 /*
11341 11340 * called by the framework to get a SCSI capability
11342 11341 */
11343 11342 static int
11344 11343 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11345 11344 {
11346 11345 return (fcp_commoncap(ap, cap, 0, whom, 0));
11347 11346 }
11348 11347
11349 11348
11350 11349 /*
11351 11350 * called by the framework to set a SCSI capability
11352 11351 */
11353 11352 static int
11354 11353 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11355 11354 {
11356 11355 return (fcp_commoncap(ap, cap, value, whom, 1));
11357 11356 }
11358 11357
11359 11358 /*
11360 11359 * Function: fcp_pkt_setup
11361 11360 *
11362 11361 * Description: This function sets up the scsi_pkt structure passed by the
11363 11362 * caller. This function assumes fcp_pkt_constructor has been
11364 11363 * called previously for the packet passed by the caller. If
11365 11364 * successful this call will have the following results:
11366 11365 *
11367 11366 * - The resources needed that will be constant through out
11368 11367 * the whole transaction are allocated.
11369 11368 * - The fields that will be constant through out the whole
11370 11369 * transaction are initialized.
11371 11370 * - The scsi packet will be linked to the LUN structure
11372 11371 * addressed by the transaction.
11373 11372 *
11374 11373 * Argument:
11375 11374 * *pkt Pointer to a scsi_pkt structure.
11376 11375 * callback
11377 11376 * arg
11378 11377 *
11379 11378 * Return Value: 0 Success
11380 11379 * !0 Failure
11381 11380 *
11382 11381 * Context: Kernel context or interrupt context
11383 11382 */
11384 11383 /* ARGSUSED */
11385 11384 static int
11386 11385 fcp_pkt_setup(struct scsi_pkt *pkt,
11387 11386 int (*callback)(caddr_t arg),
11388 11387 caddr_t arg)
11389 11388 {
11390 11389 struct fcp_pkt *cmd;
11391 11390 struct fcp_port *pptr;
11392 11391 struct fcp_lun *plun;
11393 11392 struct fcp_tgt *ptgt;
11394 11393 int kf;
11395 11394 fc_packet_t *fpkt;
11396 11395 fc_frame_hdr_t *hp;
11397 11396
11398 11397 pptr = ADDR2FCP(&pkt->pkt_address);
11399 11398 plun = ADDR2LUN(&pkt->pkt_address);
11400 11399 ptgt = plun->lun_tgt;
11401 11400
11402 11401 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11403 11402 fpkt = cmd->cmd_fp_pkt;
11404 11403
11405 11404 /*
11406 11405 * this request is for dma allocation only
11407 11406 */
11408 11407 /*
11409 11408 * First step of fcp_scsi_init_pkt: pkt allocation
11410 11409 * We determine if the caller is willing to wait for the
11411 11410 * resources.
11412 11411 */
11413 11412 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11414 11413
11415 11414 /*
11416 11415 * Selective zeroing of the pkt.
11417 11416 */
11418 11417 cmd->cmd_back = NULL;
11419 11418 cmd->cmd_next = NULL;
11420 11419
11421 11420 /*
11422 11421 * Zero out fcp command
11423 11422 */
11424 11423 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11425 11424
11426 11425 cmd->cmd_state = FCP_PKT_IDLE;
11427 11426
11428 11427 fpkt = cmd->cmd_fp_pkt;
11429 11428 fpkt->pkt_data_acc = NULL;
11430 11429
11431 11430 /*
11432 11431 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11433 11432 * could be destroyed. We need fail pkt_setup.
11434 11433 */
11435 11434 if (pptr->port_state & FCP_STATE_OFFLINE) {
11436 11435 return (-1);
11437 11436 }
11438 11437
11439 11438 mutex_enter(&ptgt->tgt_mutex);
11440 11439 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11441 11440
11442 11441 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11443 11442 != FC_SUCCESS) {
11444 11443 mutex_exit(&ptgt->tgt_mutex);
11445 11444 return (-1);
11446 11445 }
11447 11446
11448 11447 mutex_exit(&ptgt->tgt_mutex);
11449 11448
11450 11449 /* Fill in the Fabric Channel Header */
11451 11450 hp = &fpkt->pkt_cmd_fhdr;
11452 11451 hp->r_ctl = R_CTL_COMMAND;
11453 11452 hp->rsvd = 0;
11454 11453 hp->type = FC_TYPE_SCSI_FCP;
11455 11454 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11456 11455 hp->seq_id = 0;
11457 11456 hp->df_ctl = 0;
11458 11457 hp->seq_cnt = 0;
11459 11458 hp->ox_id = 0xffff;
11460 11459 hp->rx_id = 0xffff;
11461 11460 hp->ro = 0;
11462 11461
11463 11462 /*
11464 11463 * A doubly linked list (cmd_forw, cmd_back) is built
11465 11464 * out of every allocated packet on a per-lun basis
11466 11465 *
11467 11466 * The packets are maintained in the list so as to satisfy
11468 11467 * scsi_abort() requests. At present (which is unlikely to
11469 11468 * change in the future) nobody performs a real scsi_abort
11470 11469 * in the SCSI target drivers (as they don't keep the packets
11471 11470 * after doing scsi_transport - so they don't know how to
11472 11471 * abort a packet other than sending a NULL to abort all
11473 11472 * outstanding packets)
11474 11473 */
11475 11474 mutex_enter(&plun->lun_mutex);
11476 11475 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11477 11476 plun->lun_pkt_head->cmd_back = cmd;
11478 11477 } else {
11479 11478 plun->lun_pkt_tail = cmd;
11480 11479 }
11481 11480 plun->lun_pkt_head = cmd;
11482 11481 mutex_exit(&plun->lun_mutex);
11483 11482 return (0);
11484 11483 }
11485 11484
11486 11485 /*
11487 11486 * Function: fcp_pkt_teardown
11488 11487 *
11489 11488 * Description: This function releases a scsi_pkt structure and all the
11490 11489 * resources attached to it.
11491 11490 *
11492 11491 * Argument: *pkt Pointer to a scsi_pkt structure.
11493 11492 *
11494 11493 * Return Value: None
11495 11494 *
11496 11495 * Context: User, Kernel or Interrupt context.
11497 11496 */
11498 11497 static void
11499 11498 fcp_pkt_teardown(struct scsi_pkt *pkt)
11500 11499 {
11501 11500 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11502 11501 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11503 11502 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11504 11503
11505 11504 /*
11506 11505 * Remove the packet from the per-lun list
11507 11506 */
11508 11507 mutex_enter(&plun->lun_mutex);
11509 11508 if (cmd->cmd_back) {
11510 11509 ASSERT(cmd != plun->lun_pkt_head);
11511 11510 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11512 11511 } else {
11513 11512 ASSERT(cmd == plun->lun_pkt_head);
11514 11513 plun->lun_pkt_head = cmd->cmd_forw;
11515 11514 }
11516 11515
11517 11516 if (cmd->cmd_forw) {
11518 11517 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11519 11518 } else {
11520 11519 ASSERT(cmd == plun->lun_pkt_tail);
11521 11520 plun->lun_pkt_tail = cmd->cmd_back;
11522 11521 }
11523 11522
11524 11523 mutex_exit(&plun->lun_mutex);
11525 11524
11526 11525 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11527 11526 }
11528 11527
11529 11528 /*
11530 11529 * Routine for reset notification setup, to register or cancel.
11531 11530 * This function is called by SCSA
11532 11531 */
11533 11532 /*ARGSUSED*/
11534 11533 static int
11535 11534 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11536 11535 void (*callback)(caddr_t), caddr_t arg)
11537 11536 {
11538 11537 struct fcp_port *pptr = ADDR2FCP(ap);
11539 11538
11540 11539 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11541 11540 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11542 11541 }
11543 11542
11544 11543
11545 11544 static int
11546 11545 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11547 11546 ddi_eventcookie_t *event_cookiep)
11548 11547 {
11549 11548 struct fcp_port *pptr = fcp_dip2port(dip);
11550 11549
11551 11550 if (pptr == NULL) {
11552 11551 return (DDI_FAILURE);
11553 11552 }
11554 11553
11555 11554 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11556 11555 event_cookiep, NDI_EVENT_NOPASS));
11557 11556 }
11558 11557
11559 11558
11560 11559 static int
11561 11560 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11562 11561 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11563 11562 ddi_callback_id_t *cb_id)
11564 11563 {
11565 11564 struct fcp_port *pptr = fcp_dip2port(dip);
11566 11565
11567 11566 if (pptr == NULL) {
11568 11567 return (DDI_FAILURE);
11569 11568 }
11570 11569
11571 11570 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11572 11571 eventid, callback, arg, NDI_SLEEP, cb_id));
11573 11572 }
11574 11573
11575 11574
11576 11575 static int
11577 11576 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11578 11577 {
11579 11578
11580 11579 struct fcp_port *pptr = fcp_dip2port(dip);
11581 11580
11582 11581 if (pptr == NULL) {
11583 11582 return (DDI_FAILURE);
11584 11583 }
11585 11584 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11586 11585 }
11587 11586
11588 11587
11589 11588 /*
11590 11589 * called by the transport to post an event
11591 11590 */
11592 11591 static int
11593 11592 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11594 11593 ddi_eventcookie_t eventid, void *impldata)
11595 11594 {
11596 11595 struct fcp_port *pptr = fcp_dip2port(dip);
11597 11596
11598 11597 if (pptr == NULL) {
11599 11598 return (DDI_FAILURE);
11600 11599 }
11601 11600
11602 11601 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11603 11602 eventid, impldata));
11604 11603 }
11605 11604
11606 11605
11607 11606 /*
11608 11607 * A target in in many cases in Fibre Channel has a one to one relation
11609 11608 * with a port identifier (which is also known as D_ID and also as AL_PA
11610 11609 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11611 11610 * will most likely result in resetting all LUNs (which means a reset will
11612 11611 * occur on all the SCSI devices connected at the other end of the bridge)
11613 11612 * That is the latest favorite topic for discussion, for, one can debate as
11614 11613 * hot as one likes and come up with arguably a best solution to one's
11615 11614 * satisfaction
11616 11615 *
11617 11616 * To stay on track and not digress much, here are the problems stated
11618 11617 * briefly:
11619 11618 *
11620 11619 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11621 11620 * target drivers use RESET_TARGET even if their instance is on a
11622 11621 * LUN. Doesn't that sound a bit broken ?
11623 11622 *
11624 11623 * FCP SCSI (the current spec) only defines RESET TARGET in the
11625 11624 * control fields of an FCP_CMND structure. It should have been
11626 11625 * fixed right there, giving flexibility to the initiators to
11627 11626 * minimize havoc that could be caused by resetting a target.
11628 11627 */
11629 11628 static int
11630 11629 fcp_reset_target(struct scsi_address *ap, int level)
11631 11630 {
11632 11631 int rval = FC_FAILURE;
11633 11632 char lun_id[25];
11634 11633 struct fcp_port *pptr = ADDR2FCP(ap);
11635 11634 struct fcp_lun *plun = ADDR2LUN(ap);
11636 11635 struct fcp_tgt *ptgt = plun->lun_tgt;
11637 11636 struct scsi_pkt *pkt;
11638 11637 struct fcp_pkt *cmd;
11639 11638 struct fcp_rsp *rsp;
11640 11639 uint32_t tgt_cnt;
11641 11640 struct fcp_rsp_info *rsp_info;
11642 11641 struct fcp_reset_elem *p;
11643 11642 int bval;
11644 11643
11645 11644 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11646 11645 KM_NOSLEEP)) == NULL) {
11647 11646 return (rval);
11648 11647 }
11649 11648
11650 11649 mutex_enter(&ptgt->tgt_mutex);
11651 11650 if (level == RESET_TARGET) {
11652 11651 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11653 11652 mutex_exit(&ptgt->tgt_mutex);
11654 11653 kmem_free(p, sizeof (struct fcp_reset_elem));
11655 11654 return (rval);
11656 11655 }
11657 11656 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11658 11657 (void) strcpy(lun_id, " ");
11659 11658 } else {
11660 11659 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11661 11660 mutex_exit(&ptgt->tgt_mutex);
11662 11661 kmem_free(p, sizeof (struct fcp_reset_elem));
11663 11662 return (rval);
11664 11663 }
11665 11664 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11666 11665
11667 11666 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11668 11667 }
11669 11668 tgt_cnt = ptgt->tgt_change_cnt;
11670 11669
11671 11670 mutex_exit(&ptgt->tgt_mutex);
11672 11671
11673 11672 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11674 11673 0, 0, NULL, 0)) == NULL) {
11675 11674 kmem_free(p, sizeof (struct fcp_reset_elem));
11676 11675 mutex_enter(&ptgt->tgt_mutex);
11677 11676 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11678 11677 mutex_exit(&ptgt->tgt_mutex);
11679 11678 return (rval);
11680 11679 }
11681 11680 pkt->pkt_time = FCP_POLL_TIMEOUT;
11682 11681
11683 11682 /* fill in cmd part of packet */
11684 11683 cmd = PKT2CMD(pkt);
11685 11684 if (level == RESET_TARGET) {
11686 11685 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11687 11686 } else {
11688 11687 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11689 11688 }
11690 11689 cmd->cmd_fp_pkt->pkt_comp = NULL;
11691 11690 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11692 11691
11693 11692 /* prepare a packet for transport */
11694 11693 fcp_prepare_pkt(pptr, cmd, plun);
11695 11694
11696 11695 if (cmd->cmd_pkt->pkt_time) {
11697 11696 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11698 11697 } else {
11699 11698 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11700 11699 }
11701 11700
11702 11701 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11703 11702 bval = fcp_dopoll(pptr, cmd);
11704 11703 fc_ulp_idle_port(pptr->port_fp_handle);
11705 11704
11706 11705 /* submit the packet */
11707 11706 if (bval == TRAN_ACCEPT) {
11708 11707 int error = 3;
11709 11708
11710 11709 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11711 11710 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11712 11711 sizeof (struct fcp_rsp));
11713 11712
11714 11713 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11715 11714 if (fcp_validate_fcp_response(rsp, pptr) ==
11716 11715 FC_SUCCESS) {
11717 11716 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11718 11717 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11719 11718 sizeof (struct fcp_rsp), rsp_info,
11720 11719 cmd->cmd_fp_pkt->pkt_resp_acc,
11721 11720 sizeof (struct fcp_rsp_info));
11722 11721 }
11723 11722 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11724 11723 rval = FC_SUCCESS;
11725 11724 error = 0;
11726 11725 } else {
11727 11726 error = 1;
11728 11727 }
11729 11728 } else {
11730 11729 error = 2;
11731 11730 }
11732 11731 }
11733 11732
11734 11733 switch (error) {
11735 11734 case 0:
11736 11735 fcp_log(CE_WARN, pptr->port_dip,
11737 11736 "!FCP: WWN 0x%08x%08x %s reset successfully",
11738 11737 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11739 11738 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11740 11739 break;
11741 11740
11742 11741 case 1:
11743 11742 fcp_log(CE_WARN, pptr->port_dip,
11744 11743 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11745 11744 " response code=%x",
11746 11745 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11747 11746 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11748 11747 rsp_info->rsp_code);
11749 11748 break;
11750 11749
11751 11750 case 2:
11752 11751 fcp_log(CE_WARN, pptr->port_dip,
11753 11752 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11754 11753 " Bad FCP response values: rsvd1=%x,"
11755 11754 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11756 11755 " rsplen=%x, senselen=%x",
11757 11756 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11758 11757 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11759 11758 rsp->reserved_0, rsp->reserved_1,
11760 11759 rsp->fcp_u.fcp_status.reserved_0,
11761 11760 rsp->fcp_u.fcp_status.reserved_1,
11762 11761 rsp->fcp_response_len, rsp->fcp_sense_len);
11763 11762 break;
11764 11763
11765 11764 default:
11766 11765 fcp_log(CE_WARN, pptr->port_dip,
11767 11766 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11768 11767 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11769 11768 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11770 11769 break;
11771 11770 }
11772 11771 }
11773 11772 scsi_destroy_pkt(pkt);
11774 11773
11775 11774 if (rval == FC_FAILURE) {
11776 11775 mutex_enter(&ptgt->tgt_mutex);
11777 11776 if (level == RESET_TARGET) {
11778 11777 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11779 11778 } else {
11780 11779 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11781 11780 }
11782 11781 mutex_exit(&ptgt->tgt_mutex);
11783 11782 kmem_free(p, sizeof (struct fcp_reset_elem));
11784 11783 return (rval);
11785 11784 }
11786 11785
11787 11786 mutex_enter(&pptr->port_mutex);
11788 11787 if (level == RESET_TARGET) {
11789 11788 p->tgt = ptgt;
11790 11789 p->lun = NULL;
11791 11790 } else {
11792 11791 p->tgt = NULL;
11793 11792 p->lun = plun;
11794 11793 }
11795 11794 p->tgt = ptgt;
11796 11795 p->tgt_cnt = tgt_cnt;
11797 11796 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11798 11797 p->next = pptr->port_reset_list;
11799 11798 pptr->port_reset_list = p;
11800 11799
11801 11800 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11802 11801 fcp_trace, FCP_BUF_LEVEL_3, 0,
11803 11802 "Notify ssd of the reset to reinstate the reservations");
11804 11803
11805 11804 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11806 11805 &pptr->port_reset_notify_listf);
11807 11806
11808 11807 mutex_exit(&pptr->port_mutex);
11809 11808
11810 11809 return (rval);
11811 11810 }
11812 11811
11813 11812
11814 11813 /*
11815 11814 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11816 11815 * SCSI capabilities
11817 11816 */
11818 11817 /* ARGSUSED */
11819 11818 static int
11820 11819 fcp_commoncap(struct scsi_address *ap, char *cap,
11821 11820 int val, int tgtonly, int doset)
11822 11821 {
11823 11822 struct fcp_port *pptr = ADDR2FCP(ap);
11824 11823 struct fcp_lun *plun = ADDR2LUN(ap);
11825 11824 struct fcp_tgt *ptgt = plun->lun_tgt;
11826 11825 int cidx;
11827 11826 int rval = FALSE;
11828 11827
11829 11828 if (cap == (char *)0) {
11830 11829 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11831 11830 fcp_trace, FCP_BUF_LEVEL_3, 0,
11832 11831 "fcp_commoncap: invalid arg");
11833 11832 return (rval);
11834 11833 }
11835 11834
11836 11835 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11837 11836 return (UNDEFINED);
11838 11837 }
11839 11838
11840 11839 /*
11841 11840 * Process setcap request.
11842 11841 */
11843 11842 if (doset) {
11844 11843 /*
11845 11844 * At present, we can only set binary (0/1) values
11846 11845 */
11847 11846 switch (cidx) {
11848 11847 case SCSI_CAP_ARQ:
11849 11848 if (val == 0) {
11850 11849 rval = FALSE;
11851 11850 } else {
11852 11851 rval = TRUE;
11853 11852 }
11854 11853 break;
11855 11854
11856 11855 case SCSI_CAP_LUN_RESET:
11857 11856 if (val) {
11858 11857 plun->lun_cap |= FCP_LUN_CAP_RESET;
11859 11858 } else {
11860 11859 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11861 11860 }
11862 11861 rval = TRUE;
11863 11862 break;
11864 11863
11865 11864 case SCSI_CAP_SECTOR_SIZE:
11866 11865 rval = TRUE;
11867 11866 break;
11868 11867 default:
11869 11868 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11870 11869 fcp_trace, FCP_BUF_LEVEL_4, 0,
11871 11870 "fcp_setcap: unsupported %d", cidx);
11872 11871 rval = UNDEFINED;
11873 11872 break;
11874 11873 }
11875 11874
11876 11875 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11877 11876 fcp_trace, FCP_BUF_LEVEL_5, 0,
11878 11877 "set cap: cap=%s, val/tgtonly/doset/rval = "
11879 11878 "0x%x/0x%x/0x%x/%d",
11880 11879 cap, val, tgtonly, doset, rval);
11881 11880
11882 11881 } else {
11883 11882 /*
11884 11883 * Process getcap request.
11885 11884 */
11886 11885 switch (cidx) {
11887 11886 case SCSI_CAP_DMA_MAX:
11888 11887 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11889 11888
11890 11889 /*
11891 11890 * Need to make an adjustment qlc is uint_t 64
11892 11891 * st is int, so we will make the adjustment here
11893 11892 * being as nobody wants to touch this.
11894 11893 * It still leaves the max single block length
11895 11894 * of 2 gig. This should last .
11896 11895 */
11897 11896
11898 11897 if (rval == -1) {
11899 11898 rval = MAX_INT_DMA;
11900 11899 }
11901 11900
11902 11901 break;
11903 11902
11904 11903 case SCSI_CAP_INITIATOR_ID:
11905 11904 rval = pptr->port_id;
11906 11905 break;
11907 11906
11908 11907 case SCSI_CAP_ARQ:
11909 11908 case SCSI_CAP_RESET_NOTIFICATION:
11910 11909 case SCSI_CAP_TAGGED_QING:
11911 11910 rval = TRUE;
11912 11911 break;
11913 11912
11914 11913 case SCSI_CAP_SCSI_VERSION:
11915 11914 rval = 3;
11916 11915 break;
11917 11916
11918 11917 case SCSI_CAP_INTERCONNECT_TYPE:
11919 11918 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11920 11919 (ptgt->tgt_hard_addr == 0)) {
11921 11920 rval = INTERCONNECT_FABRIC;
11922 11921 } else {
11923 11922 rval = INTERCONNECT_FIBRE;
11924 11923 }
11925 11924 break;
11926 11925
11927 11926 case SCSI_CAP_LUN_RESET:
11928 11927 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11929 11928 TRUE : FALSE;
11930 11929 break;
11931 11930
11932 11931 default:
11933 11932 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11934 11933 fcp_trace, FCP_BUF_LEVEL_4, 0,
11935 11934 "fcp_getcap: unsupported %d", cidx);
11936 11935 rval = UNDEFINED;
11937 11936 break;
11938 11937 }
11939 11938
11940 11939 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11941 11940 fcp_trace, FCP_BUF_LEVEL_8, 0,
11942 11941 "get cap: cap=%s, val/tgtonly/doset/rval = "
11943 11942 "0x%x/0x%x/0x%x/%d",
11944 11943 cap, val, tgtonly, doset, rval);
11945 11944 }
11946 11945
11947 11946 return (rval);
11948 11947 }
11949 11948
11950 11949 /*
11951 11950 * called by the transport to get the port-wwn and lun
11952 11951 * properties of this device, and to create a "name" based on them
11953 11952 *
11954 11953 * these properties don't exist on sun4m
11955 11954 *
11956 11955 * return 1 for success else return 0
11957 11956 */
11958 11957 /* ARGSUSED */
11959 11958 static int
11960 11959 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11961 11960 {
11962 11961 int i;
11963 11962 int *lun;
11964 11963 int numChars;
11965 11964 uint_t nlun;
11966 11965 uint_t count;
11967 11966 uint_t nbytes;
11968 11967 uchar_t *bytes;
11969 11968 uint16_t lun_num;
11970 11969 uint32_t tgt_id;
11971 11970 char **conf_wwn;
11972 11971 char tbuf[(FC_WWN_SIZE << 1) + 1];
11973 11972 uchar_t barray[FC_WWN_SIZE];
11974 11973 dev_info_t *tgt_dip;
11975 11974 struct fcp_tgt *ptgt;
11976 11975 struct fcp_port *pptr;
11977 11976 struct fcp_lun *plun;
11978 11977
11979 11978 ASSERT(sd != NULL);
11980 11979 ASSERT(name != NULL);
11981 11980
11982 11981 tgt_dip = sd->sd_dev;
11983 11982 pptr = ddi_get_soft_state(fcp_softstate,
11984 11983 ddi_get_instance(ddi_get_parent(tgt_dip)));
11985 11984 if (pptr == NULL) {
11986 11985 return (0);
11987 11986 }
11988 11987
11989 11988 ASSERT(tgt_dip != NULL);
11990 11989
11991 11990 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11992 11991 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11993 11992 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11994 11993 name[0] = '\0';
11995 11994 return (0);
11996 11995 }
11997 11996
11998 11997 if (nlun == 0) {
11999 11998 ddi_prop_free(lun);
12000 11999 return (0);
12001 12000 }
12002 12001
12003 12002 lun_num = lun[0];
12004 12003 ddi_prop_free(lun);
12005 12004
12006 12005 /*
12007 12006 * Lookup for .conf WWN property
12008 12007 */
12009 12008 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12010 12009 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12011 12010 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12012 12011 ASSERT(count >= 1);
12013 12012
12014 12013 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12015 12014 ddi_prop_free(conf_wwn);
12016 12015 mutex_enter(&pptr->port_mutex);
12017 12016 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12018 12017 mutex_exit(&pptr->port_mutex);
12019 12018 return (0);
12020 12019 }
12021 12020 ptgt = plun->lun_tgt;
12022 12021 mutex_exit(&pptr->port_mutex);
12023 12022
12024 12023 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12025 12024 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12026 12025
12027 12026 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12028 12027 ptgt->tgt_hard_addr != 0) {
12029 12028 tgt_id = (uint32_t)fcp_alpa_to_switch[
12030 12029 ptgt->tgt_hard_addr];
12031 12030 } else {
12032 12031 tgt_id = ptgt->tgt_d_id;
12033 12032 }
12034 12033
12035 12034 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12036 12035 TARGET_PROP, tgt_id);
12037 12036 }
12038 12037
12039 12038 /* get the our port-wwn property */
12040 12039 bytes = NULL;
12041 12040 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12042 12041 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12043 12042 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12044 12043 if (bytes != NULL) {
12045 12044 ddi_prop_free(bytes);
12046 12045 }
12047 12046 return (0);
12048 12047 }
12049 12048
12050 12049 for (i = 0; i < FC_WWN_SIZE; i++) {
12051 12050 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12052 12051 }
12053 12052
12054 12053 /* Stick in the address of the form "wWWN,LUN" */
12055 12054 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12056 12055
12057 12056 ASSERT(numChars < len);
12058 12057 if (numChars >= len) {
12059 12058 fcp_log(CE_WARN, pptr->port_dip,
12060 12059 "!fcp_scsi_get_name: "
12061 12060 "name parameter length too small, it needs to be %d",
12062 12061 numChars+1);
12063 12062 }
12064 12063
12065 12064 ddi_prop_free(bytes);
12066 12065
12067 12066 return (1);
12068 12067 }
12069 12068
12070 12069
12071 12070 /*
12072 12071 * called by the transport to get the SCSI target id value, returning
12073 12072 * it in "name"
12074 12073 *
12075 12074 * this isn't needed/used on sun4m
12076 12075 *
12077 12076 * return 1 for success else return 0
12078 12077 */
12079 12078 /* ARGSUSED */
12080 12079 static int
12081 12080 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12082 12081 {
12083 12082 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12084 12083 struct fcp_tgt *ptgt;
12085 12084 int numChars;
12086 12085
12087 12086 if (plun == NULL) {
12088 12087 return (0);
12089 12088 }
12090 12089
12091 12090 if ((ptgt = plun->lun_tgt) == NULL) {
12092 12091 return (0);
12093 12092 }
12094 12093
12095 12094 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12096 12095
12097 12096 ASSERT(numChars < len);
12098 12097 if (numChars >= len) {
12099 12098 fcp_log(CE_WARN, NULL,
12100 12099 "!fcp_scsi_get_bus_addr: "
12101 12100 "name parameter length too small, it needs to be %d",
12102 12101 numChars+1);
12103 12102 }
12104 12103
12105 12104 return (1);
12106 12105 }
12107 12106
12108 12107
12109 12108 /*
12110 12109 * called internally to reset the link where the specified port lives
12111 12110 */
12112 12111 static int
12113 12112 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12114 12113 {
12115 12114 la_wwn_t wwn;
12116 12115 struct fcp_lun *plun;
12117 12116 struct fcp_tgt *ptgt;
12118 12117
12119 12118 /* disable restart of lip if we're suspended */
12120 12119 mutex_enter(&pptr->port_mutex);
12121 12120
12122 12121 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12123 12122 FCP_STATE_POWER_DOWN)) {
12124 12123 mutex_exit(&pptr->port_mutex);
12125 12124 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12126 12125 fcp_trace, FCP_BUF_LEVEL_2, 0,
12127 12126 "fcp_linkreset, fcp%d: link reset "
12128 12127 "disabled due to DDI_SUSPEND",
12129 12128 ddi_get_instance(pptr->port_dip));
12130 12129 return (FC_FAILURE);
12131 12130 }
12132 12131
12133 12132 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12134 12133 mutex_exit(&pptr->port_mutex);
12135 12134 return (FC_SUCCESS);
12136 12135 }
12137 12136
12138 12137 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12139 12138 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12140 12139
12141 12140 /*
12142 12141 * If ap == NULL assume local link reset.
12143 12142 */
12144 12143 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12145 12144 plun = ADDR2LUN(ap);
12146 12145 ptgt = plun->lun_tgt;
12147 12146 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12148 12147 } else {
12149 12148 bzero((caddr_t)&wwn, sizeof (wwn));
12150 12149 }
12151 12150 mutex_exit(&pptr->port_mutex);
12152 12151
12153 12152 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12154 12153 }
12155 12154
12156 12155
12157 12156 /*
12158 12157 * called from fcp_port_attach() to resume a port
12159 12158 * return DDI_* success/failure status
12160 12159 * acquires and releases the global mutex
12161 12160 * acquires and releases the port mutex
12162 12161 */
12163 12162 /*ARGSUSED*/
12164 12163
12165 12164 static int
12166 12165 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12167 12166 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12168 12167 {
12169 12168 int res = DDI_FAILURE; /* default result */
12170 12169 struct fcp_port *pptr; /* port state ptr */
12171 12170 uint32_t alloc_cnt;
12172 12171 uint32_t max_cnt;
12173 12172 fc_portmap_t *tmp_list = NULL;
12174 12173
12175 12174 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12176 12175 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12177 12176 instance);
12178 12177
12179 12178 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12180 12179 cmn_err(CE_WARN, "fcp: bad soft state");
12181 12180 return (res);
12182 12181 }
12183 12182
12184 12183 mutex_enter(&pptr->port_mutex);
12185 12184 switch (cmd) {
12186 12185 case FC_CMD_RESUME:
12187 12186 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12188 12187 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12189 12188 break;
12190 12189
12191 12190 case FC_CMD_POWER_UP:
12192 12191 /*
12193 12192 * If the port is DDI_SUSPENded, defer rediscovery
12194 12193 * until DDI_RESUME occurs
12195 12194 */
12196 12195 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12197 12196 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12198 12197 mutex_exit(&pptr->port_mutex);
12199 12198 return (DDI_SUCCESS);
12200 12199 }
12201 12200 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12202 12201 }
12203 12202 pptr->port_id = s_id;
12204 12203 pptr->port_state = FCP_STATE_INIT;
↓ open down ↓ |
1893 lines elided |
↑ open up ↑ |
12205 12204 mutex_exit(&pptr->port_mutex);
12206 12205
12207 12206 /*
12208 12207 * Make a copy of ulp_port_info as fctl allocates
12209 12208 * a temp struct.
12210 12209 */
12211 12210 (void) fcp_cp_pinfo(pptr, pinfo);
12212 12211
12213 12212 mutex_enter(&fcp_global_mutex);
12214 12213 if (fcp_watchdog_init++ == 0) {
12215 - fcp_watchdog_tick = fcp_watchdog_timeout *
12216 - drv_usectohz(1000000);
12214 + fcp_watchdog_tick = drv_sectohz(fcp_watchdog_timeout);
12217 12215 fcp_watchdog_id = timeout(fcp_watch,
12218 12216 NULL, fcp_watchdog_tick);
12219 12217 }
12220 12218 mutex_exit(&fcp_global_mutex);
12221 12219
12222 12220 /*
12223 12221 * Handle various topologies and link states.
12224 12222 */
12225 12223 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12226 12224 case FC_STATE_OFFLINE:
12227 12225 /*
12228 12226 * Wait for ONLINE, at which time a state
12229 12227 * change will cause a statec_callback
12230 12228 */
12231 12229 res = DDI_SUCCESS;
12232 12230 break;
12233 12231
12234 12232 case FC_STATE_ONLINE:
12235 12233
12236 12234 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12237 12235 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12238 12236 res = DDI_SUCCESS;
12239 12237 break;
12240 12238 }
12241 12239
12242 12240 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12243 12241 !fcp_enable_auto_configuration) {
12244 12242 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12245 12243 if (tmp_list == NULL) {
12246 12244 if (!alloc_cnt) {
12247 12245 res = DDI_SUCCESS;
12248 12246 }
12249 12247 break;
12250 12248 }
12251 12249 max_cnt = alloc_cnt;
12252 12250 } else {
12253 12251 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12254 12252
12255 12253 alloc_cnt = FCP_MAX_DEVICES;
12256 12254
12257 12255 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12258 12256 (sizeof (fc_portmap_t)) * alloc_cnt,
12259 12257 KM_NOSLEEP)) == NULL) {
12260 12258 fcp_log(CE_WARN, pptr->port_dip,
12261 12259 "!fcp%d: failed to allocate portmap",
12262 12260 instance);
12263 12261 break;
12264 12262 }
12265 12263
12266 12264 max_cnt = alloc_cnt;
12267 12265 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12268 12266 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12269 12267 FC_SUCCESS) {
12270 12268 caddr_t msg;
12271 12269
12272 12270 (void) fc_ulp_error(res, &msg);
12273 12271
12274 12272 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12275 12273 fcp_trace, FCP_BUF_LEVEL_2, 0,
12276 12274 "resume failed getportmap: reason=0x%x",
12277 12275 res);
12278 12276
12279 12277 fcp_log(CE_WARN, pptr->port_dip,
12280 12278 "!failed to get port map : %s", msg);
12281 12279 break;
12282 12280 }
12283 12281 if (max_cnt > alloc_cnt) {
12284 12282 alloc_cnt = max_cnt;
12285 12283 }
12286 12284 }
12287 12285
12288 12286 /*
12289 12287 * do the SCSI device discovery and create
12290 12288 * the devinfos
12291 12289 */
12292 12290 fcp_statec_callback(ulph, pptr->port_fp_handle,
12293 12291 pptr->port_phys_state, pptr->port_topology, tmp_list,
12294 12292 max_cnt, pptr->port_id);
12295 12293
12296 12294 res = DDI_SUCCESS;
12297 12295 break;
12298 12296
12299 12297 default:
12300 12298 fcp_log(CE_WARN, pptr->port_dip,
12301 12299 "!fcp%d: invalid port state at attach=0x%x",
12302 12300 instance, pptr->port_phys_state);
12303 12301
12304 12302 mutex_enter(&pptr->port_mutex);
12305 12303 pptr->port_phys_state = FCP_STATE_OFFLINE;
12306 12304 mutex_exit(&pptr->port_mutex);
12307 12305 res = DDI_SUCCESS;
12308 12306
12309 12307 break;
12310 12308 }
12311 12309
12312 12310 if (tmp_list != NULL) {
12313 12311 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12314 12312 }
12315 12313
12316 12314 return (res);
12317 12315 }
12318 12316
12319 12317
12320 12318 static void
12321 12319 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12322 12320 {
12323 12321 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12324 12322 pptr->port_dip = pinfo->port_dip;
12325 12323 pptr->port_fp_handle = pinfo->port_handle;
12326 12324 if (pinfo->port_acc_attr != NULL) {
12327 12325 /*
12328 12326 * FCA supports DMA
12329 12327 */
12330 12328 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12331 12329 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12332 12330 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12333 12331 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12334 12332 }
12335 12333 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12336 12334 pptr->port_max_exch = pinfo->port_fca_max_exch;
12337 12335 pptr->port_phys_state = pinfo->port_state;
12338 12336 pptr->port_topology = pinfo->port_flags;
12339 12337 pptr->port_reset_action = pinfo->port_reset_action;
12340 12338 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12341 12339 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12342 12340 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12343 12341 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12344 12342
12345 12343 /* Clear FMA caps to avoid fm-capability ereport */
12346 12344 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12347 12345 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12348 12346 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12349 12347 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12350 12348 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12351 12349 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12352 12350 }
12353 12351
12354 12352 /*
12355 12353 * If the elements wait field is set to 1 then
12356 12354 * another thread is waiting for the operation to complete. Once
12357 12355 * it is complete, the waiting thread is signaled and the element is
12358 12356 * freed by the waiting thread. If the elements wait field is set to 0
12359 12357 * the element is freed.
12360 12358 */
12361 12359 static void
12362 12360 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12363 12361 {
12364 12362 ASSERT(elem != NULL);
12365 12363 mutex_enter(&elem->mutex);
12366 12364 elem->result = result;
12367 12365 if (elem->wait) {
12368 12366 elem->wait = 0;
12369 12367 cv_signal(&elem->cv);
12370 12368 mutex_exit(&elem->mutex);
12371 12369 } else {
12372 12370 mutex_exit(&elem->mutex);
12373 12371 cv_destroy(&elem->cv);
12374 12372 mutex_destroy(&elem->mutex);
12375 12373 kmem_free(elem, sizeof (struct fcp_hp_elem));
12376 12374 }
12377 12375 }
12378 12376
12379 12377 /*
12380 12378 * This function is invoked from the taskq thread to allocate
12381 12379 * devinfo nodes and to online/offline them.
12382 12380 */
12383 12381 static void
12384 12382 fcp_hp_task(void *arg)
12385 12383 {
12386 12384 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12387 12385 struct fcp_lun *plun = elem->lun;
12388 12386 struct fcp_port *pptr = elem->port;
12389 12387 int result;
12390 12388
12391 12389 ASSERT(elem->what == FCP_ONLINE ||
12392 12390 elem->what == FCP_OFFLINE ||
12393 12391 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12394 12392 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12395 12393
12396 12394 mutex_enter(&pptr->port_mutex);
12397 12395 mutex_enter(&plun->lun_mutex);
12398 12396 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12399 12397 plun->lun_event_count != elem->event_cnt) ||
12400 12398 pptr->port_state & (FCP_STATE_SUSPENDED |
12401 12399 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12402 12400 mutex_exit(&plun->lun_mutex);
12403 12401 mutex_exit(&pptr->port_mutex);
12404 12402 fcp_process_elem(elem, NDI_FAILURE);
12405 12403 return;
12406 12404 }
12407 12405 mutex_exit(&plun->lun_mutex);
12408 12406 mutex_exit(&pptr->port_mutex);
12409 12407
12410 12408 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12411 12409 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12412 12410 fcp_process_elem(elem, result);
12413 12411 }
12414 12412
12415 12413
12416 12414 static child_info_t *
12417 12415 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12418 12416 int tcount)
12419 12417 {
12420 12418 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12421 12419
12422 12420 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12423 12421 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12424 12422
12425 12423 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12426 12424 /*
12427 12425 * Child has not been created yet. Create the child device
12428 12426 * based on the per-Lun flags.
12429 12427 */
12430 12428 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12431 12429 plun->lun_cip =
12432 12430 CIP(fcp_create_dip(plun, lcount, tcount));
12433 12431 plun->lun_mpxio = 0;
12434 12432 } else {
12435 12433 plun->lun_cip =
12436 12434 CIP(fcp_create_pip(plun, lcount, tcount));
12437 12435 plun->lun_mpxio = 1;
12438 12436 }
12439 12437 } else {
12440 12438 plun->lun_cip = cip;
12441 12439 }
12442 12440
12443 12441 return (plun->lun_cip);
12444 12442 }
12445 12443
12446 12444
12447 12445 static int
12448 12446 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12449 12447 {
12450 12448 int rval = FC_FAILURE;
12451 12449 dev_info_t *pdip;
12452 12450 struct dev_info *dip;
12453 12451 int circular;
12454 12452
12455 12453 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12456 12454
12457 12455 pdip = plun->lun_tgt->tgt_port->port_dip;
12458 12456
12459 12457 if (plun->lun_cip == NULL) {
12460 12458 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12461 12459 fcp_trace, FCP_BUF_LEVEL_3, 0,
12462 12460 "fcp_is_dip_present: plun->lun_cip is NULL: "
12463 12461 "plun: %p lun state: %x num: %d target state: %x",
12464 12462 plun, plun->lun_state, plun->lun_num,
12465 12463 plun->lun_tgt->tgt_port->port_state);
12466 12464 return (rval);
12467 12465 }
12468 12466 ndi_devi_enter(pdip, &circular);
12469 12467 dip = DEVI(pdip)->devi_child;
12470 12468 while (dip) {
12471 12469 if (dip == DEVI(cdip)) {
12472 12470 rval = FC_SUCCESS;
12473 12471 break;
12474 12472 }
12475 12473 dip = dip->devi_sibling;
12476 12474 }
12477 12475 ndi_devi_exit(pdip, circular);
12478 12476 return (rval);
12479 12477 }
12480 12478
12481 12479 static int
12482 12480 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12483 12481 {
12484 12482 int rval = FC_FAILURE;
12485 12483
12486 12484 ASSERT(plun != NULL);
12487 12485 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12488 12486
12489 12487 if (plun->lun_mpxio == 0) {
12490 12488 rval = fcp_is_dip_present(plun, DIP(cip));
12491 12489 } else {
12492 12490 rval = fcp_is_pip_present(plun, PIP(cip));
12493 12491 }
12494 12492
12495 12493 return (rval);
12496 12494 }
12497 12495
12498 12496 /*
12499 12497 * Function: fcp_create_dip
12500 12498 *
12501 12499 * Description: Creates a dev_info_t structure for the LUN specified by the
12502 12500 * caller.
12503 12501 *
12504 12502 * Argument: plun Lun structure
12505 12503 * link_cnt Link state count.
12506 12504 * tgt_cnt Target state change count.
12507 12505 *
12508 12506 * Return Value: NULL if it failed
12509 12507 * dev_info_t structure address if it succeeded
12510 12508 *
12511 12509 * Context: Kernel context
12512 12510 */
12513 12511 static dev_info_t *
12514 12512 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12515 12513 {
12516 12514 int failure = 0;
12517 12515 uint32_t tgt_id;
12518 12516 uint64_t sam_lun;
12519 12517 struct fcp_tgt *ptgt = plun->lun_tgt;
12520 12518 struct fcp_port *pptr = ptgt->tgt_port;
12521 12519 dev_info_t *pdip = pptr->port_dip;
12522 12520 dev_info_t *cdip = NULL;
12523 12521 dev_info_t *old_dip = DIP(plun->lun_cip);
12524 12522 char *nname = NULL;
12525 12523 char **compatible = NULL;
12526 12524 int ncompatible;
12527 12525 char *scsi_binding_set;
12528 12526 char t_pwwn[17];
12529 12527
12530 12528 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12531 12529 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12532 12530
12533 12531 /* get the 'scsi-binding-set' property */
12534 12532 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12535 12533 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12536 12534 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12537 12535 scsi_binding_set = NULL;
12538 12536 }
12539 12537
12540 12538 /* determine the node name and compatible */
12541 12539 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12542 12540 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12543 12541 if (scsi_binding_set) {
12544 12542 ddi_prop_free(scsi_binding_set);
12545 12543 }
12546 12544
12547 12545 if (nname == NULL) {
12548 12546 #ifdef DEBUG
12549 12547 cmn_err(CE_WARN, "%s%d: no driver for "
12550 12548 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12551 12549 " compatible: %s",
12552 12550 ddi_driver_name(pdip), ddi_get_instance(pdip),
12553 12551 ptgt->tgt_port_wwn.raw_wwn[0],
12554 12552 ptgt->tgt_port_wwn.raw_wwn[1],
12555 12553 ptgt->tgt_port_wwn.raw_wwn[2],
12556 12554 ptgt->tgt_port_wwn.raw_wwn[3],
12557 12555 ptgt->tgt_port_wwn.raw_wwn[4],
12558 12556 ptgt->tgt_port_wwn.raw_wwn[5],
12559 12557 ptgt->tgt_port_wwn.raw_wwn[6],
12560 12558 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12561 12559 *compatible);
12562 12560 #endif /* DEBUG */
12563 12561 failure++;
12564 12562 goto end_of_fcp_create_dip;
12565 12563 }
12566 12564
12567 12565 cdip = fcp_find_existing_dip(plun, pdip, nname);
12568 12566
12569 12567 /*
12570 12568 * if the old_dip does not match the cdip, that means there is
12571 12569 * some property change. since we'll be using the cdip, we need
12572 12570 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12573 12571 * then the dtype for the device has been updated. Offline the
12574 12572 * the old device and create a new device with the new device type
12575 12573 * Refer to bug: 4764752
12576 12574 */
12577 12575 if (old_dip && (cdip != old_dip ||
12578 12576 plun->lun_state & FCP_LUN_CHANGED)) {
12579 12577 plun->lun_state &= ~(FCP_LUN_INIT);
12580 12578 mutex_exit(&plun->lun_mutex);
12581 12579 mutex_exit(&pptr->port_mutex);
12582 12580
12583 12581 mutex_enter(&ptgt->tgt_mutex);
12584 12582 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12585 12583 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12586 12584 mutex_exit(&ptgt->tgt_mutex);
12587 12585
12588 12586 #ifdef DEBUG
12589 12587 if (cdip != NULL) {
12590 12588 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12591 12589 fcp_trace, FCP_BUF_LEVEL_2, 0,
12592 12590 "Old dip=%p; New dip=%p don't match", old_dip,
12593 12591 cdip);
12594 12592 } else {
12595 12593 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12596 12594 fcp_trace, FCP_BUF_LEVEL_2, 0,
12597 12595 "Old dip=%p; New dip=NULL don't match", old_dip);
12598 12596 }
12599 12597 #endif
12600 12598
12601 12599 mutex_enter(&pptr->port_mutex);
12602 12600 mutex_enter(&plun->lun_mutex);
12603 12601 }
12604 12602
12605 12603 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12606 12604 plun->lun_state &= ~(FCP_LUN_CHANGED);
12607 12605 if (ndi_devi_alloc(pptr->port_dip, nname,
12608 12606 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12609 12607 failure++;
12610 12608 goto end_of_fcp_create_dip;
12611 12609 }
12612 12610 }
12613 12611
12614 12612 /*
12615 12613 * Previously all the properties for the devinfo were destroyed here
12616 12614 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12617 12615 * the devid property (and other properties established by the target
12618 12616 * driver or framework) which the code does not always recreate, this
12619 12617 * call was removed.
12620 12618 * This opens a theoretical possibility that we may return with a
12621 12619 * stale devid on the node if the scsi entity behind the fibre channel
12622 12620 * lun has changed.
12623 12621 */
12624 12622
12625 12623 /* decorate the node with compatible */
12626 12624 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12627 12625 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12628 12626 failure++;
12629 12627 goto end_of_fcp_create_dip;
12630 12628 }
12631 12629
12632 12630 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12633 12631 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12634 12632 failure++;
12635 12633 goto end_of_fcp_create_dip;
12636 12634 }
12637 12635
12638 12636 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12639 12637 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12640 12638 failure++;
12641 12639 goto end_of_fcp_create_dip;
12642 12640 }
12643 12641
12644 12642 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12645 12643 t_pwwn[16] = '\0';
12646 12644 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12647 12645 != DDI_PROP_SUCCESS) {
12648 12646 failure++;
12649 12647 goto end_of_fcp_create_dip;
12650 12648 }
12651 12649
12652 12650 /*
12653 12651 * If there is no hard address - We might have to deal with
12654 12652 * that by using WWN - Having said that it is important to
12655 12653 * recognize this problem early so ssd can be informed of
12656 12654 * the right interconnect type.
12657 12655 */
12658 12656 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12659 12657 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12660 12658 } else {
12661 12659 tgt_id = ptgt->tgt_d_id;
12662 12660 }
12663 12661
12664 12662 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12665 12663 tgt_id) != DDI_PROP_SUCCESS) {
12666 12664 failure++;
12667 12665 goto end_of_fcp_create_dip;
12668 12666 }
12669 12667
12670 12668 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12671 12669 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12672 12670 failure++;
12673 12671 goto end_of_fcp_create_dip;
12674 12672 }
12675 12673 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12676 12674 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12677 12675 sam_lun) != DDI_PROP_SUCCESS) {
12678 12676 failure++;
12679 12677 goto end_of_fcp_create_dip;
12680 12678 }
12681 12679
12682 12680 end_of_fcp_create_dip:
12683 12681 scsi_hba_nodename_compatible_free(nname, compatible);
12684 12682
12685 12683 if (cdip != NULL && failure) {
12686 12684 (void) ndi_prop_remove_all(cdip);
12687 12685 (void) ndi_devi_free(cdip);
12688 12686 cdip = NULL;
12689 12687 }
12690 12688
12691 12689 return (cdip);
12692 12690 }
12693 12691
12694 12692 /*
12695 12693 * Function: fcp_create_pip
12696 12694 *
12697 12695 * Description: Creates a Path Id for the LUN specified by the caller.
12698 12696 *
12699 12697 * Argument: plun Lun structure
12700 12698 * link_cnt Link state count.
12701 12699 * tgt_cnt Target state count.
12702 12700 *
12703 12701 * Return Value: NULL if it failed
12704 12702 * mdi_pathinfo_t structure address if it succeeded
12705 12703 *
12706 12704 * Context: Kernel context
12707 12705 */
12708 12706 static mdi_pathinfo_t *
12709 12707 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12710 12708 {
12711 12709 int i;
12712 12710 char buf[MAXNAMELEN];
12713 12711 char uaddr[MAXNAMELEN];
12714 12712 int failure = 0;
12715 12713 uint32_t tgt_id;
12716 12714 uint64_t sam_lun;
12717 12715 struct fcp_tgt *ptgt = plun->lun_tgt;
12718 12716 struct fcp_port *pptr = ptgt->tgt_port;
12719 12717 dev_info_t *pdip = pptr->port_dip;
12720 12718 mdi_pathinfo_t *pip = NULL;
12721 12719 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12722 12720 char *nname = NULL;
12723 12721 char **compatible = NULL;
12724 12722 int ncompatible;
12725 12723 char *scsi_binding_set;
12726 12724 char t_pwwn[17];
12727 12725
12728 12726 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12729 12727 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12730 12728
12731 12729 scsi_binding_set = "vhci";
12732 12730
12733 12731 /* determine the node name and compatible */
12734 12732 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12735 12733 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12736 12734
12737 12735 if (nname == NULL) {
12738 12736 #ifdef DEBUG
12739 12737 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12740 12738 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12741 12739 " compatible: %s",
12742 12740 ddi_driver_name(pdip), ddi_get_instance(pdip),
12743 12741 ptgt->tgt_port_wwn.raw_wwn[0],
12744 12742 ptgt->tgt_port_wwn.raw_wwn[1],
12745 12743 ptgt->tgt_port_wwn.raw_wwn[2],
12746 12744 ptgt->tgt_port_wwn.raw_wwn[3],
12747 12745 ptgt->tgt_port_wwn.raw_wwn[4],
12748 12746 ptgt->tgt_port_wwn.raw_wwn[5],
12749 12747 ptgt->tgt_port_wwn.raw_wwn[6],
12750 12748 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12751 12749 *compatible);
12752 12750 #endif /* DEBUG */
12753 12751 failure++;
12754 12752 goto end_of_fcp_create_pip;
12755 12753 }
12756 12754
12757 12755 pip = fcp_find_existing_pip(plun, pdip);
12758 12756
12759 12757 /*
12760 12758 * if the old_dip does not match the cdip, that means there is
12761 12759 * some property change. since we'll be using the cdip, we need
12762 12760 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12763 12761 * then the dtype for the device has been updated. Offline the
12764 12762 * the old device and create a new device with the new device type
12765 12763 * Refer to bug: 4764752
12766 12764 */
12767 12765 if (old_pip && (pip != old_pip ||
12768 12766 plun->lun_state & FCP_LUN_CHANGED)) {
12769 12767 plun->lun_state &= ~(FCP_LUN_INIT);
12770 12768 mutex_exit(&plun->lun_mutex);
12771 12769 mutex_exit(&pptr->port_mutex);
12772 12770
12773 12771 mutex_enter(&ptgt->tgt_mutex);
12774 12772 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12775 12773 FCP_OFFLINE, lcount, tcount,
12776 12774 NDI_DEVI_REMOVE, 0);
12777 12775 mutex_exit(&ptgt->tgt_mutex);
12778 12776
12779 12777 if (pip != NULL) {
12780 12778 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12781 12779 fcp_trace, FCP_BUF_LEVEL_2, 0,
12782 12780 "Old pip=%p; New pip=%p don't match",
12783 12781 old_pip, pip);
12784 12782 } else {
12785 12783 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12786 12784 fcp_trace, FCP_BUF_LEVEL_2, 0,
12787 12785 "Old pip=%p; New pip=NULL don't match",
12788 12786 old_pip);
12789 12787 }
12790 12788
12791 12789 mutex_enter(&pptr->port_mutex);
12792 12790 mutex_enter(&plun->lun_mutex);
12793 12791 }
12794 12792
12795 12793 /*
12796 12794 * Since FC_WWN_SIZE is 8 bytes and its not like the
12797 12795 * lun_guid_size which is dependent on the target, I don't
12798 12796 * believe the same trancation happens here UNLESS the standards
12799 12797 * change the FC_WWN_SIZE value to something larger than
12800 12798 * MAXNAMELEN(currently 255 bytes).
12801 12799 */
12802 12800
12803 12801 for (i = 0; i < FC_WWN_SIZE; i++) {
12804 12802 (void) sprintf(&buf[i << 1], "%02x",
12805 12803 ptgt->tgt_port_wwn.raw_wwn[i]);
12806 12804 }
12807 12805
12808 12806 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12809 12807 buf, plun->lun_num);
12810 12808
12811 12809 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12812 12810 /*
12813 12811 * Release the locks before calling into
12814 12812 * mdi_pi_alloc_compatible() since this can result in a
12815 12813 * callback into fcp which can result in a deadlock
12816 12814 * (see bug # 4870272).
12817 12815 *
12818 12816 * Basically, what we are trying to avoid is the scenario where
12819 12817 * one thread does ndi_devi_enter() and tries to grab
12820 12818 * fcp_mutex and another does it the other way round.
12821 12819 *
12822 12820 * But before we do that, make sure that nobody releases the
12823 12821 * port in the meantime. We can do this by setting a flag.
12824 12822 */
12825 12823 plun->lun_state &= ~(FCP_LUN_CHANGED);
12826 12824 pptr->port_state |= FCP_STATE_IN_MDI;
12827 12825 mutex_exit(&plun->lun_mutex);
12828 12826 mutex_exit(&pptr->port_mutex);
12829 12827 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12830 12828 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12831 12829 fcp_log(CE_WARN, pptr->port_dip,
12832 12830 "!path alloc failed:0x%x", plun);
12833 12831 mutex_enter(&pptr->port_mutex);
12834 12832 mutex_enter(&plun->lun_mutex);
12835 12833 pptr->port_state &= ~FCP_STATE_IN_MDI;
12836 12834 failure++;
12837 12835 goto end_of_fcp_create_pip;
12838 12836 }
12839 12837 mutex_enter(&pptr->port_mutex);
12840 12838 mutex_enter(&plun->lun_mutex);
12841 12839 pptr->port_state &= ~FCP_STATE_IN_MDI;
12842 12840 } else {
12843 12841 (void) mdi_prop_remove(pip, NULL);
12844 12842 }
12845 12843
12846 12844 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12847 12845
12848 12846 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12849 12847 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12850 12848 != DDI_PROP_SUCCESS) {
12851 12849 failure++;
12852 12850 goto end_of_fcp_create_pip;
12853 12851 }
12854 12852
12855 12853 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12856 12854 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12857 12855 != DDI_PROP_SUCCESS) {
12858 12856 failure++;
12859 12857 goto end_of_fcp_create_pip;
12860 12858 }
12861 12859
12862 12860 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12863 12861 t_pwwn[16] = '\0';
12864 12862 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12865 12863 != DDI_PROP_SUCCESS) {
12866 12864 failure++;
12867 12865 goto end_of_fcp_create_pip;
12868 12866 }
12869 12867
12870 12868 /*
12871 12869 * If there is no hard address - We might have to deal with
12872 12870 * that by using WWN - Having said that it is important to
12873 12871 * recognize this problem early so ssd can be informed of
12874 12872 * the right interconnect type.
12875 12873 */
12876 12874 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12877 12875 ptgt->tgt_hard_addr != 0) {
12878 12876 tgt_id = (uint32_t)
12879 12877 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12880 12878 } else {
12881 12879 tgt_id = ptgt->tgt_d_id;
12882 12880 }
12883 12881
12884 12882 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12885 12883 != DDI_PROP_SUCCESS) {
12886 12884 failure++;
12887 12885 goto end_of_fcp_create_pip;
12888 12886 }
12889 12887
12890 12888 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12891 12889 != DDI_PROP_SUCCESS) {
12892 12890 failure++;
12893 12891 goto end_of_fcp_create_pip;
12894 12892 }
12895 12893 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12896 12894 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12897 12895 != DDI_PROP_SUCCESS) {
12898 12896 failure++;
12899 12897 goto end_of_fcp_create_pip;
12900 12898 }
12901 12899
12902 12900 end_of_fcp_create_pip:
12903 12901 scsi_hba_nodename_compatible_free(nname, compatible);
12904 12902
12905 12903 if (pip != NULL && failure) {
12906 12904 (void) mdi_prop_remove(pip, NULL);
12907 12905 mutex_exit(&plun->lun_mutex);
12908 12906 mutex_exit(&pptr->port_mutex);
12909 12907 (void) mdi_pi_free(pip, 0);
12910 12908 mutex_enter(&pptr->port_mutex);
12911 12909 mutex_enter(&plun->lun_mutex);
12912 12910 pip = NULL;
12913 12911 }
12914 12912
12915 12913 return (pip);
12916 12914 }
12917 12915
12918 12916 static dev_info_t *
12919 12917 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12920 12918 {
12921 12919 uint_t nbytes;
12922 12920 uchar_t *bytes;
12923 12921 uint_t nwords;
12924 12922 uint32_t tgt_id;
12925 12923 int *words;
12926 12924 dev_info_t *cdip;
12927 12925 dev_info_t *ndip;
12928 12926 struct fcp_tgt *ptgt = plun->lun_tgt;
12929 12927 struct fcp_port *pptr = ptgt->tgt_port;
12930 12928 int circular;
12931 12929
12932 12930 ndi_devi_enter(pdip, &circular);
12933 12931
12934 12932 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12935 12933 while ((cdip = ndip) != NULL) {
12936 12934 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12937 12935
12938 12936 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12939 12937 continue;
12940 12938 }
12941 12939
12942 12940 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12943 12941 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12944 12942 &nbytes) != DDI_PROP_SUCCESS) {
12945 12943 continue;
12946 12944 }
12947 12945
12948 12946 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12949 12947 if (bytes != NULL) {
12950 12948 ddi_prop_free(bytes);
12951 12949 }
12952 12950 continue;
12953 12951 }
12954 12952 ASSERT(bytes != NULL);
12955 12953
12956 12954 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12957 12955 ddi_prop_free(bytes);
12958 12956 continue;
12959 12957 }
12960 12958
12961 12959 ddi_prop_free(bytes);
12962 12960
12963 12961 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12964 12962 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12965 12963 &nbytes) != DDI_PROP_SUCCESS) {
12966 12964 continue;
12967 12965 }
12968 12966
12969 12967 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12970 12968 if (bytes != NULL) {
12971 12969 ddi_prop_free(bytes);
12972 12970 }
12973 12971 continue;
12974 12972 }
12975 12973 ASSERT(bytes != NULL);
12976 12974
12977 12975 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12978 12976 ddi_prop_free(bytes);
12979 12977 continue;
12980 12978 }
12981 12979
12982 12980 ddi_prop_free(bytes);
12983 12981
12984 12982 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12985 12983 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12986 12984 &nwords) != DDI_PROP_SUCCESS) {
12987 12985 continue;
12988 12986 }
12989 12987
12990 12988 if (nwords != 1 || words == NULL) {
12991 12989 if (words != NULL) {
12992 12990 ddi_prop_free(words);
12993 12991 }
12994 12992 continue;
12995 12993 }
12996 12994 ASSERT(words != NULL);
12997 12995
12998 12996 /*
12999 12997 * If there is no hard address - We might have to deal with
13000 12998 * that by using WWN - Having said that it is important to
13001 12999 * recognize this problem early so ssd can be informed of
13002 13000 * the right interconnect type.
13003 13001 */
13004 13002 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13005 13003 ptgt->tgt_hard_addr != 0) {
13006 13004 tgt_id =
13007 13005 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13008 13006 } else {
13009 13007 tgt_id = ptgt->tgt_d_id;
13010 13008 }
13011 13009
13012 13010 if (tgt_id != (uint32_t)*words) {
13013 13011 ddi_prop_free(words);
13014 13012 continue;
13015 13013 }
13016 13014 ddi_prop_free(words);
13017 13015
13018 13016 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13019 13017 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13020 13018 &nwords) != DDI_PROP_SUCCESS) {
13021 13019 continue;
13022 13020 }
13023 13021
13024 13022 if (nwords != 1 || words == NULL) {
13025 13023 if (words != NULL) {
13026 13024 ddi_prop_free(words);
13027 13025 }
13028 13026 continue;
13029 13027 }
13030 13028 ASSERT(words != NULL);
13031 13029
13032 13030 if (plun->lun_num == (uint16_t)*words) {
13033 13031 ddi_prop_free(words);
13034 13032 break;
13035 13033 }
13036 13034 ddi_prop_free(words);
13037 13035 }
13038 13036 ndi_devi_exit(pdip, circular);
13039 13037
13040 13038 return (cdip);
13041 13039 }
13042 13040
13043 13041
13044 13042 static int
13045 13043 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13046 13044 {
13047 13045 dev_info_t *pdip;
13048 13046 char buf[MAXNAMELEN];
13049 13047 char uaddr[MAXNAMELEN];
13050 13048 int rval = FC_FAILURE;
13051 13049
13052 13050 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13053 13051
13054 13052 pdip = plun->lun_tgt->tgt_port->port_dip;
13055 13053
13056 13054 /*
13057 13055 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13058 13056 * non-NULL even when the LUN is not there as in the case when a LUN is
13059 13057 * configured and then deleted on the device end (for T3/T4 case). In
13060 13058 * such cases, pip will be NULL.
13061 13059 *
13062 13060 * If the device generates an RSCN, it will end up getting offlined when
13063 13061 * it disappeared and a new LUN will get created when it is rediscovered
13064 13062 * on the device. If we check for lun_cip here, the LUN will not end
13065 13063 * up getting onlined since this function will end up returning a
13066 13064 * FC_SUCCESS.
13067 13065 *
13068 13066 * The behavior is different on other devices. For instance, on a HDS,
13069 13067 * there was no RSCN generated by the device but the next I/O generated
13070 13068 * a check condition and rediscovery got triggered that way. So, in
13071 13069 * such cases, this path will not be exercised
13072 13070 */
13073 13071 if (pip == NULL) {
13074 13072 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13075 13073 fcp_trace, FCP_BUF_LEVEL_4, 0,
13076 13074 "fcp_is_pip_present: plun->lun_cip is NULL: "
13077 13075 "plun: %p lun state: %x num: %d target state: %x",
13078 13076 plun, plun->lun_state, plun->lun_num,
13079 13077 plun->lun_tgt->tgt_port->port_state);
13080 13078 return (rval);
13081 13079 }
13082 13080
13083 13081 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13084 13082
13085 13083 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13086 13084
13087 13085 if (plun->lun_old_guid) {
13088 13086 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13089 13087 rval = FC_SUCCESS;
13090 13088 }
13091 13089 } else {
13092 13090 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13093 13091 rval = FC_SUCCESS;
13094 13092 }
13095 13093 }
13096 13094 return (rval);
13097 13095 }
13098 13096
13099 13097 static mdi_pathinfo_t *
13100 13098 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13101 13099 {
13102 13100 char buf[MAXNAMELEN];
13103 13101 char uaddr[MAXNAMELEN];
13104 13102 mdi_pathinfo_t *pip;
13105 13103 struct fcp_tgt *ptgt = plun->lun_tgt;
13106 13104 struct fcp_port *pptr = ptgt->tgt_port;
13107 13105
13108 13106 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13109 13107
13110 13108 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13111 13109 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13112 13110
13113 13111 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13114 13112
13115 13113 return (pip);
13116 13114 }
13117 13115
13118 13116
13119 13117 static int
13120 13118 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13121 13119 int tcount, int flags, int *circ)
13122 13120 {
13123 13121 int rval;
13124 13122 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13125 13123 struct fcp_tgt *ptgt = plun->lun_tgt;
13126 13124 dev_info_t *cdip = NULL;
13127 13125
13128 13126 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13129 13127 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13130 13128
13131 13129 if (plun->lun_cip == NULL) {
13132 13130 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13131 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13132 "fcp_online_child: plun->lun_cip is NULL: "
13135 13133 "plun: %p state: %x num: %d target state: %x",
13136 13134 plun, plun->lun_state, plun->lun_num,
13137 13135 plun->lun_tgt->tgt_port->port_state);
13138 13136 return (NDI_FAILURE);
13139 13137 }
13140 13138 again:
13141 13139 if (plun->lun_mpxio == 0) {
13142 13140 cdip = DIP(cip);
13143 13141 mutex_exit(&plun->lun_mutex);
13144 13142 mutex_exit(&pptr->port_mutex);
13145 13143
13146 13144 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 13145 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 13146 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13149 13147 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13150 13148
13151 13149 /*
13152 13150 * We could check for FCP_LUN_INIT here but chances
13153 13151 * of getting here when it's already in FCP_LUN_INIT
13154 13152 * is rare and a duplicate ndi_devi_online wouldn't
13155 13153 * hurt either (as the node would already have been
13156 13154 * in CF2)
13157 13155 */
13158 13156 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13159 13157 rval = ndi_devi_bind_driver(cdip, flags);
13160 13158 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13161 13159 fcp_trace, FCP_BUF_LEVEL_3, 0,
13162 13160 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13163 13161 } else {
13164 13162 rval = ndi_devi_online(cdip, flags);
13165 13163 }
13166 13164
13167 13165 /*
13168 13166 * We log the message into trace buffer if the device
13169 13167 * is "ses" and into syslog for any other device
13170 13168 * type. This is to prevent the ndi_devi_online failure
13171 13169 * message that appears for V880/A5K ses devices.
13172 13170 */
13173 13171 if (rval == NDI_SUCCESS) {
13174 13172 mutex_enter(&ptgt->tgt_mutex);
13175 13173 plun->lun_state |= FCP_LUN_INIT;
13176 13174 mutex_exit(&ptgt->tgt_mutex);
13177 13175 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13178 13176 fcp_log(CE_NOTE, pptr->port_dip,
13179 13177 "!ndi_devi_online:"
13180 13178 " failed for %s: target=%x lun=%x %x",
13181 13179 ddi_get_name(cdip), ptgt->tgt_d_id,
13182 13180 plun->lun_num, rval);
13183 13181 } else {
13184 13182 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13185 13183 fcp_trace, FCP_BUF_LEVEL_3, 0,
13186 13184 " !ndi_devi_online:"
13187 13185 " failed for %s: target=%x lun=%x %x",
13188 13186 ddi_get_name(cdip), ptgt->tgt_d_id,
13189 13187 plun->lun_num, rval);
13190 13188 }
13191 13189 } else {
13192 13190 cdip = mdi_pi_get_client(PIP(cip));
13193 13191 mutex_exit(&plun->lun_mutex);
13194 13192 mutex_exit(&pptr->port_mutex);
13195 13193
13196 13194 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13197 13195 fcp_trace, FCP_BUF_LEVEL_3, 0,
13198 13196 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13199 13197 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13200 13198
13201 13199 /*
13202 13200 * Hold path and exit phci to avoid deadlock with power
13203 13201 * management code during mdi_pi_online.
13204 13202 */
13205 13203 mdi_hold_path(PIP(cip));
13206 13204 mdi_devi_exit_phci(pptr->port_dip, *circ);
13207 13205
13208 13206 rval = mdi_pi_online(PIP(cip), flags);
13209 13207
13210 13208 mdi_devi_enter_phci(pptr->port_dip, circ);
13211 13209 mdi_rele_path(PIP(cip));
13212 13210
13213 13211 if (rval == MDI_SUCCESS) {
13214 13212 mutex_enter(&ptgt->tgt_mutex);
13215 13213 plun->lun_state |= FCP_LUN_INIT;
13216 13214 mutex_exit(&ptgt->tgt_mutex);
13217 13215
13218 13216 /*
13219 13217 * Clear MPxIO path permanent disable in case
13220 13218 * fcp hotplug dropped the offline event.
13221 13219 */
13222 13220 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13223 13221
13224 13222 } else if (rval == MDI_NOT_SUPPORTED) {
13225 13223 child_info_t *old_cip = cip;
13226 13224
13227 13225 /*
13228 13226 * MPxIO does not support this device yet.
13229 13227 * Enumerate in legacy mode.
13230 13228 */
13231 13229 mutex_enter(&pptr->port_mutex);
13232 13230 mutex_enter(&plun->lun_mutex);
13233 13231 plun->lun_mpxio = 0;
13234 13232 plun->lun_cip = NULL;
13235 13233 cdip = fcp_create_dip(plun, lcount, tcount);
13236 13234 plun->lun_cip = cip = CIP(cdip);
13237 13235 if (cip == NULL) {
13238 13236 fcp_log(CE_WARN, pptr->port_dip,
13239 13237 "!fcp_online_child: "
13240 13238 "Create devinfo failed for LU=%p", plun);
13241 13239 mutex_exit(&plun->lun_mutex);
13242 13240
13243 13241 mutex_enter(&ptgt->tgt_mutex);
13244 13242 plun->lun_state |= FCP_LUN_OFFLINE;
13245 13243 mutex_exit(&ptgt->tgt_mutex);
13246 13244
13247 13245 mutex_exit(&pptr->port_mutex);
13248 13246
13249 13247 /*
13250 13248 * free the mdi_pathinfo node
13251 13249 */
13252 13250 (void) mdi_pi_free(PIP(old_cip), 0);
13253 13251 } else {
13254 13252 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13255 13253 fcp_trace, FCP_BUF_LEVEL_3, 0,
13256 13254 "fcp_online_child: creating devinfo "
13257 13255 "node 0x%p for plun 0x%p",
13258 13256 cip, plun);
13259 13257 mutex_exit(&plun->lun_mutex);
13260 13258 mutex_exit(&pptr->port_mutex);
13261 13259 /*
13262 13260 * free the mdi_pathinfo node
13263 13261 */
13264 13262 (void) mdi_pi_free(PIP(old_cip), 0);
13265 13263 mutex_enter(&pptr->port_mutex);
13266 13264 mutex_enter(&plun->lun_mutex);
13267 13265 goto again;
13268 13266 }
13269 13267 } else {
13270 13268 if (cdip) {
13271 13269 fcp_log(CE_NOTE, pptr->port_dip,
13272 13270 "!fcp_online_child: mdi_pi_online:"
13273 13271 " failed for %s: target=%x lun=%x %x",
13274 13272 ddi_get_name(cdip), ptgt->tgt_d_id,
13275 13273 plun->lun_num, rval);
13276 13274 }
13277 13275 }
13278 13276 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13279 13277 }
13280 13278
13281 13279 if (rval == NDI_SUCCESS) {
13282 13280 if (cdip) {
13283 13281 (void) ndi_event_retrieve_cookie(
13284 13282 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13285 13283 &fcp_insert_eid, NDI_EVENT_NOPASS);
13286 13284 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13287 13285 cdip, fcp_insert_eid, NULL);
13288 13286 }
13289 13287 }
13290 13288 mutex_enter(&pptr->port_mutex);
13291 13289 mutex_enter(&plun->lun_mutex);
13292 13290 return (rval);
13293 13291 }
13294 13292
13295 13293 /* ARGSUSED */
13296 13294 static int
13297 13295 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13298 13296 int tcount, int flags, int *circ)
13299 13297 {
13300 13298 int rval;
13301 13299 int lun_mpxio;
13302 13300 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13303 13301 struct fcp_tgt *ptgt = plun->lun_tgt;
13304 13302 dev_info_t *cdip;
13305 13303
13306 13304 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13307 13305 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13308 13306
13309 13307 if (plun->lun_cip == NULL) {
13310 13308 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13311 13309 fcp_trace, FCP_BUF_LEVEL_3, 0,
13312 13310 "fcp_offline_child: plun->lun_cip is NULL: "
13313 13311 "plun: %p lun state: %x num: %d target state: %x",
13314 13312 plun, plun->lun_state, plun->lun_num,
13315 13313 plun->lun_tgt->tgt_port->port_state);
13316 13314 return (NDI_FAILURE);
13317 13315 }
13318 13316
13319 13317 /*
13320 13318 * We will use this value twice. Make a copy to be sure we use
13321 13319 * the same value in both places.
13322 13320 */
13323 13321 lun_mpxio = plun->lun_mpxio;
13324 13322
13325 13323 if (lun_mpxio == 0) {
13326 13324 cdip = DIP(cip);
13327 13325 mutex_exit(&plun->lun_mutex);
13328 13326 mutex_exit(&pptr->port_mutex);
13329 13327 rval = ndi_devi_offline(DIP(cip), flags);
13330 13328 if (rval != NDI_SUCCESS) {
13331 13329 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13332 13330 fcp_trace, FCP_BUF_LEVEL_3, 0,
13333 13331 "fcp_offline_child: ndi_devi_offline failed "
13334 13332 "rval=%x cip=%p", rval, cip);
13335 13333 }
13336 13334 } else {
13337 13335 cdip = mdi_pi_get_client(PIP(cip));
13338 13336 mutex_exit(&plun->lun_mutex);
13339 13337 mutex_exit(&pptr->port_mutex);
13340 13338
13341 13339 /*
13342 13340 * Exit phci to avoid deadlock with power management code
13343 13341 * during mdi_pi_offline
13344 13342 */
13345 13343 mdi_hold_path(PIP(cip));
13346 13344 mdi_devi_exit_phci(pptr->port_dip, *circ);
13347 13345
13348 13346 rval = mdi_pi_offline(PIP(cip), flags);
13349 13347
13350 13348 mdi_devi_enter_phci(pptr->port_dip, circ);
13351 13349 mdi_rele_path(PIP(cip));
13352 13350
13353 13351 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13354 13352 }
13355 13353
13356 13354 mutex_enter(&ptgt->tgt_mutex);
13357 13355 plun->lun_state &= ~FCP_LUN_INIT;
13358 13356 mutex_exit(&ptgt->tgt_mutex);
13359 13357
13360 13358 if (rval == NDI_SUCCESS) {
13361 13359 cdip = NULL;
13362 13360 if (flags & NDI_DEVI_REMOVE) {
13363 13361 mutex_enter(&plun->lun_mutex);
13364 13362 /*
13365 13363 * If the guid of the LUN changes, lun_cip will not
13366 13364 * equal to cip, and after offlining the LUN with the
13367 13365 * old guid, we should keep lun_cip since it's the cip
13368 13366 * of the LUN with the new guid.
13369 13367 * Otherwise remove our reference to child node.
13370 13368 *
13371 13369 * This must be done before the child node is freed,
13372 13370 * otherwise other threads could see a stale lun_cip
13373 13371 * pointer.
13374 13372 */
13375 13373 if (plun->lun_cip == cip) {
13376 13374 plun->lun_cip = NULL;
13377 13375 }
13378 13376 if (plun->lun_old_guid) {
13379 13377 kmem_free(plun->lun_old_guid,
13380 13378 plun->lun_old_guid_size);
13381 13379 plun->lun_old_guid = NULL;
13382 13380 plun->lun_old_guid_size = 0;
13383 13381 }
13384 13382 mutex_exit(&plun->lun_mutex);
13385 13383 }
13386 13384 }
13387 13385
13388 13386 if (lun_mpxio != 0) {
13389 13387 if (rval == NDI_SUCCESS) {
13390 13388 /*
13391 13389 * Clear MPxIO path permanent disable as the path is
13392 13390 * already offlined.
13393 13391 */
13394 13392 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13395 13393
13396 13394 if (flags & NDI_DEVI_REMOVE) {
13397 13395 (void) mdi_pi_free(PIP(cip), 0);
13398 13396 }
13399 13397 } else {
13400 13398 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13401 13399 fcp_trace, FCP_BUF_LEVEL_3, 0,
13402 13400 "fcp_offline_child: mdi_pi_offline failed "
13403 13401 "rval=%x cip=%p", rval, cip);
13404 13402 }
13405 13403 }
13406 13404
13407 13405 mutex_enter(&pptr->port_mutex);
13408 13406 mutex_enter(&plun->lun_mutex);
13409 13407
13410 13408 if (cdip) {
13411 13409 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13412 13410 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13413 13411 " target=%x lun=%x", "ndi_offline",
13414 13412 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13415 13413 }
13416 13414
13417 13415 return (rval);
13418 13416 }
13419 13417
13420 13418 static void
13421 13419 fcp_remove_child(struct fcp_lun *plun)
13422 13420 {
13423 13421 child_info_t *cip;
13424 13422 int circ;
13425 13423
13426 13424 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13427 13425
13428 13426 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13429 13427 if (plun->lun_mpxio == 0) {
13430 13428 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13431 13429 (void) ndi_devi_free(DIP(plun->lun_cip));
13432 13430 plun->lun_cip = NULL;
13433 13431 } else {
13434 13432 /*
13435 13433 * Clear reference to the child node in the lun.
13436 13434 * This must be done before freeing it with mdi_pi_free
13437 13435 * and with lun_mutex held so that other threads always
13438 13436 * see either valid lun_cip or NULL when holding
13439 13437 * lun_mutex. We keep a copy in cip.
13440 13438 */
13441 13439 cip = plun->lun_cip;
13442 13440 plun->lun_cip = NULL;
13443 13441
13444 13442 mutex_exit(&plun->lun_mutex);
13445 13443 mutex_exit(&plun->lun_tgt->tgt_mutex);
13446 13444 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13447 13445
13448 13446 mdi_devi_enter(
13449 13447 plun->lun_tgt->tgt_port->port_dip, &circ);
13450 13448
13451 13449 /*
13452 13450 * Exit phci to avoid deadlock with power management
13453 13451 * code during mdi_pi_offline
13454 13452 */
13455 13453 mdi_hold_path(PIP(cip));
13456 13454 mdi_devi_exit_phci(
13457 13455 plun->lun_tgt->tgt_port->port_dip, circ);
13458 13456 (void) mdi_pi_offline(PIP(cip),
13459 13457 NDI_DEVI_REMOVE);
13460 13458 mdi_devi_enter_phci(
13461 13459 plun->lun_tgt->tgt_port->port_dip, &circ);
13462 13460 mdi_rele_path(PIP(cip));
13463 13461
13464 13462 mdi_devi_exit(
13465 13463 plun->lun_tgt->tgt_port->port_dip, circ);
13466 13464
13467 13465 FCP_TRACE(fcp_logq,
13468 13466 plun->lun_tgt->tgt_port->port_instbuf,
13469 13467 fcp_trace, FCP_BUF_LEVEL_3, 0,
13470 13468 "lun=%p pip freed %p", plun, cip);
13471 13469
13472 13470 (void) mdi_prop_remove(PIP(cip), NULL);
13473 13471 (void) mdi_pi_free(PIP(cip), 0);
13474 13472
13475 13473 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13476 13474 mutex_enter(&plun->lun_tgt->tgt_mutex);
13477 13475 mutex_enter(&plun->lun_mutex);
13478 13476 }
13479 13477 } else {
13480 13478 plun->lun_cip = NULL;
13481 13479 }
13482 13480 }
13483 13481
13484 13482 /*
13485 13483 * called when a timeout occurs
13486 13484 *
13487 13485 * can be scheduled during an attach or resume (if not already running)
13488 13486 *
13489 13487 * one timeout is set up for all ports
13490 13488 *
13491 13489 * acquires and releases the global mutex
13492 13490 */
13493 13491 /*ARGSUSED*/
13494 13492 static void
13495 13493 fcp_watch(void *arg)
13496 13494 {
13497 13495 struct fcp_port *pptr;
13498 13496 struct fcp_ipkt *icmd;
13499 13497 struct fcp_ipkt *nicmd;
13500 13498 struct fcp_pkt *cmd;
13501 13499 struct fcp_pkt *ncmd;
13502 13500 struct fcp_pkt *tail;
13503 13501 struct fcp_pkt *pcmd;
13504 13502 struct fcp_pkt *save_head;
13505 13503 struct fcp_port *save_port;
13506 13504
13507 13505 /* increment global watchdog time */
13508 13506 fcp_watchdog_time += fcp_watchdog_timeout;
13509 13507
13510 13508 mutex_enter(&fcp_global_mutex);
13511 13509
13512 13510 /* scan each port in our list */
13513 13511 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13514 13512 save_port = fcp_port_head;
13515 13513 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13516 13514 mutex_exit(&fcp_global_mutex);
13517 13515
13518 13516 mutex_enter(&pptr->port_mutex);
13519 13517 if (pptr->port_ipkt_list == NULL &&
13520 13518 (pptr->port_state & (FCP_STATE_SUSPENDED |
13521 13519 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13522 13520 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13523 13521 mutex_exit(&pptr->port_mutex);
13524 13522 mutex_enter(&fcp_global_mutex);
13525 13523 goto end_of_watchdog;
13526 13524 }
13527 13525
13528 13526 /*
13529 13527 * We check if a list of targets need to be offlined.
13530 13528 */
13531 13529 if (pptr->port_offline_tgts) {
13532 13530 fcp_scan_offline_tgts(pptr);
13533 13531 }
13534 13532
13535 13533 /*
13536 13534 * We check if a list of luns need to be offlined.
13537 13535 */
13538 13536 if (pptr->port_offline_luns) {
13539 13537 fcp_scan_offline_luns(pptr);
13540 13538 }
13541 13539
13542 13540 /*
13543 13541 * We check if a list of targets or luns need to be reset.
13544 13542 */
13545 13543 if (pptr->port_reset_list) {
13546 13544 fcp_check_reset_delay(pptr);
13547 13545 }
13548 13546
13549 13547 mutex_exit(&pptr->port_mutex);
13550 13548
13551 13549 /*
13552 13550 * This is where the pending commands (pkt) are checked for
13553 13551 * timeout.
13554 13552 */
13555 13553 mutex_enter(&pptr->port_pkt_mutex);
13556 13554 tail = pptr->port_pkt_tail;
13557 13555
13558 13556 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13559 13557 cmd != NULL; cmd = ncmd) {
13560 13558 ncmd = cmd->cmd_next;
13561 13559 /*
13562 13560 * If a command is in this queue the bit CFLAG_IN_QUEUE
13563 13561 * must be set.
13564 13562 */
13565 13563 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13566 13564 /*
13567 13565 * FCP_INVALID_TIMEOUT will be set for those
13568 13566 * command that need to be failed. Mostly those
13569 13567 * cmds that could not be queued down for the
13570 13568 * "timeout" value. cmd->cmd_timeout is used
13571 13569 * to try and requeue the command regularly.
13572 13570 */
13573 13571 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13574 13572 /*
13575 13573 * This command hasn't timed out yet. Let's
13576 13574 * go to the next one.
13577 13575 */
13578 13576 pcmd = cmd;
13579 13577 goto end_of_loop;
13580 13578 }
13581 13579
13582 13580 if (cmd == pptr->port_pkt_head) {
13583 13581 ASSERT(pcmd == NULL);
13584 13582 pptr->port_pkt_head = cmd->cmd_next;
13585 13583 } else {
13586 13584 ASSERT(pcmd != NULL);
13587 13585 pcmd->cmd_next = cmd->cmd_next;
13588 13586 }
13589 13587
13590 13588 if (cmd == pptr->port_pkt_tail) {
13591 13589 ASSERT(cmd->cmd_next == NULL);
13592 13590 pptr->port_pkt_tail = pcmd;
13593 13591 if (pcmd) {
13594 13592 pcmd->cmd_next = NULL;
13595 13593 }
13596 13594 }
13597 13595 cmd->cmd_next = NULL;
13598 13596
13599 13597 /*
13600 13598 * save the current head before dropping the
13601 13599 * mutex - If the head doesn't remain the
13602 13600 * same after re acquiring the mutex, just
13603 13601 * bail out and revisit on next tick.
13604 13602 *
13605 13603 * PS: The tail pointer can change as the commands
13606 13604 * get requeued after failure to retransport
13607 13605 */
13608 13606 save_head = pptr->port_pkt_head;
13609 13607 mutex_exit(&pptr->port_pkt_mutex);
13610 13608
13611 13609 if (cmd->cmd_fp_pkt->pkt_timeout ==
13612 13610 FCP_INVALID_TIMEOUT) {
13613 13611 struct scsi_pkt *pkt = cmd->cmd_pkt;
13614 13612 struct fcp_lun *plun;
13615 13613 struct fcp_tgt *ptgt;
13616 13614
13617 13615 plun = ADDR2LUN(&pkt->pkt_address);
13618 13616 ptgt = plun->lun_tgt;
13619 13617
13620 13618 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13621 13619 fcp_trace, FCP_BUF_LEVEL_2, 0,
13622 13620 "SCSI cmd 0x%x to D_ID=%x timed out",
13623 13621 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13624 13622
13625 13623 cmd->cmd_state == FCP_PKT_ABORTING ?
13626 13624 fcp_fail_cmd(cmd, CMD_RESET,
13627 13625 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13628 13626 CMD_TIMEOUT, STAT_ABORTED);
13629 13627 } else {
13630 13628 fcp_retransport_cmd(pptr, cmd);
13631 13629 }
13632 13630 mutex_enter(&pptr->port_pkt_mutex);
13633 13631 if (save_head && save_head != pptr->port_pkt_head) {
13634 13632 /*
13635 13633 * Looks like linked list got changed (mostly
13636 13634 * happens when an an OFFLINE LUN code starts
13637 13635 * returning overflow queue commands in
13638 13636 * parallel. So bail out and revisit during
13639 13637 * next tick
13640 13638 */
13641 13639 break;
13642 13640 }
13643 13641 end_of_loop:
13644 13642 /*
13645 13643 * Scan only upto the previously known tail pointer
13646 13644 * to avoid excessive processing - lots of new packets
13647 13645 * could have been added to the tail or the old ones
13648 13646 * re-queued.
13649 13647 */
13650 13648 if (cmd == tail) {
13651 13649 break;
13652 13650 }
13653 13651 }
13654 13652 mutex_exit(&pptr->port_pkt_mutex);
13655 13653
13656 13654 mutex_enter(&pptr->port_mutex);
13657 13655 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13658 13656 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13659 13657
13660 13658 nicmd = icmd->ipkt_next;
13661 13659 if ((icmd->ipkt_restart != 0) &&
13662 13660 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13663 13661 /* packet has not timed out */
13664 13662 continue;
13665 13663 }
13666 13664
13667 13665 /* time for packet re-transport */
13668 13666 if (icmd == pptr->port_ipkt_list) {
13669 13667 pptr->port_ipkt_list = icmd->ipkt_next;
13670 13668 if (pptr->port_ipkt_list) {
13671 13669 pptr->port_ipkt_list->ipkt_prev =
13672 13670 NULL;
13673 13671 }
13674 13672 } else {
13675 13673 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13676 13674 if (icmd->ipkt_next) {
13677 13675 icmd->ipkt_next->ipkt_prev =
13678 13676 icmd->ipkt_prev;
13679 13677 }
13680 13678 }
13681 13679 icmd->ipkt_next = NULL;
13682 13680 icmd->ipkt_prev = NULL;
13683 13681 mutex_exit(&pptr->port_mutex);
13684 13682
13685 13683 if (fcp_is_retryable(icmd)) {
13686 13684 fc_ulp_rscn_info_t *rscnp =
13687 13685 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13688 13686 pkt_ulp_rscn_infop;
13689 13687
13690 13688 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13691 13689 fcp_trace, FCP_BUF_LEVEL_2, 0,
13692 13690 "%x to D_ID=%x Retrying..",
13693 13691 icmd->ipkt_opcode,
13694 13692 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13695 13693
13696 13694 /*
13697 13695 * Update the RSCN count in the packet
13698 13696 * before resending.
13699 13697 */
13700 13698
13701 13699 if (rscnp != NULL) {
13702 13700 rscnp->ulp_rscn_count =
13703 13701 fc_ulp_get_rscn_count(pptr->
13704 13702 port_fp_handle);
13705 13703 }
13706 13704
13707 13705 mutex_enter(&pptr->port_mutex);
13708 13706 mutex_enter(&ptgt->tgt_mutex);
13709 13707 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13710 13708 mutex_exit(&ptgt->tgt_mutex);
13711 13709 mutex_exit(&pptr->port_mutex);
13712 13710 switch (icmd->ipkt_opcode) {
13713 13711 int rval;
13714 13712 case LA_ELS_PLOGI:
13715 13713 if ((rval = fc_ulp_login(
13716 13714 pptr->port_fp_handle,
13717 13715 &icmd->ipkt_fpkt, 1)) ==
13718 13716 FC_SUCCESS) {
13719 13717 mutex_enter(
13720 13718 &pptr->port_mutex);
13721 13719 continue;
13722 13720 }
13723 13721 if (fcp_handle_ipkt_errors(
13724 13722 pptr, ptgt, icmd, rval,
13725 13723 "PLOGI") == DDI_SUCCESS) {
13726 13724 mutex_enter(
13727 13725 &pptr->port_mutex);
13728 13726 continue;
13729 13727 }
13730 13728 break;
13731 13729
13732 13730 case LA_ELS_PRLI:
13733 13731 if ((rval = fc_ulp_issue_els(
13734 13732 pptr->port_fp_handle,
13735 13733 icmd->ipkt_fpkt)) ==
13736 13734 FC_SUCCESS) {
13737 13735 mutex_enter(
13738 13736 &pptr->port_mutex);
13739 13737 continue;
13740 13738 }
13741 13739 if (fcp_handle_ipkt_errors(
13742 13740 pptr, ptgt, icmd, rval,
13743 13741 "PRLI") == DDI_SUCCESS) {
13744 13742 mutex_enter(
13745 13743 &pptr->port_mutex);
13746 13744 continue;
13747 13745 }
13748 13746 break;
13749 13747
13750 13748 default:
13751 13749 if ((rval = fcp_transport(
13752 13750 pptr->port_fp_handle,
13753 13751 icmd->ipkt_fpkt, 1)) ==
13754 13752 FC_SUCCESS) {
13755 13753 mutex_enter(
13756 13754 &pptr->port_mutex);
13757 13755 continue;
13758 13756 }
13759 13757 if (fcp_handle_ipkt_errors(
13760 13758 pptr, ptgt, icmd, rval,
13761 13759 "PRLI") == DDI_SUCCESS) {
13762 13760 mutex_enter(
13763 13761 &pptr->port_mutex);
13764 13762 continue;
13765 13763 }
13766 13764 break;
13767 13765 }
13768 13766 } else {
13769 13767 mutex_exit(&ptgt->tgt_mutex);
13770 13768 mutex_exit(&pptr->port_mutex);
13771 13769 }
13772 13770 } else {
13773 13771 fcp_print_error(icmd->ipkt_fpkt);
13774 13772 }
13775 13773
13776 13774 (void) fcp_call_finish_init(pptr, ptgt,
13777 13775 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13778 13776 icmd->ipkt_cause);
13779 13777 fcp_icmd_free(pptr, icmd);
13780 13778 mutex_enter(&pptr->port_mutex);
13781 13779 }
13782 13780
13783 13781 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13784 13782 mutex_exit(&pptr->port_mutex);
13785 13783 mutex_enter(&fcp_global_mutex);
13786 13784
13787 13785 end_of_watchdog:
13788 13786 /*
13789 13787 * Bail out early before getting into trouble
13790 13788 */
13791 13789 if (save_port != fcp_port_head) {
13792 13790 break;
13793 13791 }
13794 13792 }
13795 13793
13796 13794 if (fcp_watchdog_init > 0) {
13797 13795 /* reschedule timeout to go again */
13798 13796 fcp_watchdog_id =
13799 13797 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13800 13798 }
13801 13799 mutex_exit(&fcp_global_mutex);
13802 13800 }
13803 13801
13804 13802
13805 13803 static void
13806 13804 fcp_check_reset_delay(struct fcp_port *pptr)
13807 13805 {
13808 13806 uint32_t tgt_cnt;
13809 13807 int level;
13810 13808 struct fcp_tgt *ptgt;
13811 13809 struct fcp_lun *plun;
13812 13810 struct fcp_reset_elem *cur = NULL;
13813 13811 struct fcp_reset_elem *next = NULL;
13814 13812 struct fcp_reset_elem *prev = NULL;
13815 13813
13816 13814 ASSERT(mutex_owned(&pptr->port_mutex));
13817 13815
13818 13816 next = pptr->port_reset_list;
13819 13817 while ((cur = next) != NULL) {
13820 13818 next = cur->next;
13821 13819
13822 13820 if (cur->timeout < fcp_watchdog_time) {
13823 13821 prev = cur;
13824 13822 continue;
13825 13823 }
13826 13824
13827 13825 ptgt = cur->tgt;
13828 13826 plun = cur->lun;
13829 13827 tgt_cnt = cur->tgt_cnt;
13830 13828
13831 13829 if (ptgt) {
13832 13830 level = RESET_TARGET;
13833 13831 } else {
13834 13832 ASSERT(plun != NULL);
13835 13833 level = RESET_LUN;
13836 13834 ptgt = plun->lun_tgt;
13837 13835 }
13838 13836 if (prev) {
13839 13837 prev->next = next;
13840 13838 } else {
13841 13839 /*
13842 13840 * Because we drop port mutex while doing aborts for
13843 13841 * packets, we can't rely on reset_list pointing to
13844 13842 * our head
13845 13843 */
13846 13844 if (cur == pptr->port_reset_list) {
13847 13845 pptr->port_reset_list = next;
13848 13846 } else {
13849 13847 struct fcp_reset_elem *which;
13850 13848
13851 13849 which = pptr->port_reset_list;
13852 13850 while (which && which->next != cur) {
13853 13851 which = which->next;
13854 13852 }
13855 13853 ASSERT(which != NULL);
13856 13854
13857 13855 which->next = next;
13858 13856 prev = which;
13859 13857 }
13860 13858 }
13861 13859
13862 13860 kmem_free(cur, sizeof (*cur));
13863 13861
13864 13862 if (tgt_cnt == ptgt->tgt_change_cnt) {
13865 13863 mutex_enter(&ptgt->tgt_mutex);
13866 13864 if (level == RESET_TARGET) {
13867 13865 fcp_update_tgt_state(ptgt,
13868 13866 FCP_RESET, FCP_LUN_BUSY);
13869 13867 } else {
13870 13868 fcp_update_lun_state(plun,
13871 13869 FCP_RESET, FCP_LUN_BUSY);
13872 13870 }
13873 13871 mutex_exit(&ptgt->tgt_mutex);
13874 13872
13875 13873 mutex_exit(&pptr->port_mutex);
13876 13874 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13877 13875 mutex_enter(&pptr->port_mutex);
13878 13876 }
13879 13877 }
13880 13878 }
13881 13879
13882 13880
13883 13881 static void
13884 13882 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13885 13883 struct fcp_lun *rlun, int tgt_cnt)
13886 13884 {
13887 13885 int rval;
13888 13886 struct fcp_lun *tlun, *nlun;
13889 13887 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13890 13888 *cmd = NULL, *head = NULL,
13891 13889 *tail = NULL;
13892 13890
13893 13891 mutex_enter(&pptr->port_pkt_mutex);
13894 13892 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13895 13893 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13896 13894 struct fcp_tgt *ptgt = plun->lun_tgt;
13897 13895
13898 13896 ncmd = cmd->cmd_next;
13899 13897
13900 13898 if (ptgt != ttgt && plun != rlun) {
13901 13899 pcmd = cmd;
13902 13900 continue;
13903 13901 }
13904 13902
13905 13903 if (pcmd != NULL) {
13906 13904 ASSERT(pptr->port_pkt_head != cmd);
13907 13905 pcmd->cmd_next = ncmd;
13908 13906 } else {
13909 13907 ASSERT(cmd == pptr->port_pkt_head);
13910 13908 pptr->port_pkt_head = ncmd;
13911 13909 }
13912 13910 if (pptr->port_pkt_tail == cmd) {
13913 13911 ASSERT(cmd->cmd_next == NULL);
13914 13912 pptr->port_pkt_tail = pcmd;
13915 13913 if (pcmd != NULL) {
13916 13914 pcmd->cmd_next = NULL;
13917 13915 }
13918 13916 }
13919 13917
13920 13918 if (head == NULL) {
13921 13919 head = tail = cmd;
13922 13920 } else {
13923 13921 ASSERT(tail != NULL);
13924 13922 tail->cmd_next = cmd;
13925 13923 tail = cmd;
13926 13924 }
13927 13925 cmd->cmd_next = NULL;
13928 13926 }
13929 13927 mutex_exit(&pptr->port_pkt_mutex);
13930 13928
13931 13929 for (cmd = head; cmd != NULL; cmd = ncmd) {
13932 13930 struct scsi_pkt *pkt = cmd->cmd_pkt;
13933 13931
13934 13932 ncmd = cmd->cmd_next;
13935 13933 ASSERT(pkt != NULL);
13936 13934
13937 13935 mutex_enter(&pptr->port_mutex);
13938 13936 if (ttgt->tgt_change_cnt == tgt_cnt) {
13939 13937 mutex_exit(&pptr->port_mutex);
13940 13938 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13941 13939 pkt->pkt_reason = CMD_RESET;
13942 13940 pkt->pkt_statistics |= STAT_DEV_RESET;
13943 13941 cmd->cmd_state = FCP_PKT_IDLE;
13944 13942 fcp_post_callback(cmd);
13945 13943 } else {
13946 13944 mutex_exit(&pptr->port_mutex);
13947 13945 }
13948 13946 }
13949 13947
13950 13948 /*
13951 13949 * If the FCA will return all the commands in its queue then our
13952 13950 * work is easy, just return.
13953 13951 */
13954 13952
13955 13953 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13956 13954 return;
13957 13955 }
13958 13956
13959 13957 /*
13960 13958 * For RESET_LUN get hold of target pointer
13961 13959 */
13962 13960 if (ttgt == NULL) {
13963 13961 ASSERT(rlun != NULL);
13964 13962
13965 13963 ttgt = rlun->lun_tgt;
13966 13964
13967 13965 ASSERT(ttgt != NULL);
13968 13966 }
13969 13967
13970 13968 /*
13971 13969 * There are some severe race conditions here.
13972 13970 * While we are trying to abort the pkt, it might be completing
13973 13971 * so mark it aborted and if the abort does not succeed then
13974 13972 * handle it in the watch thread.
13975 13973 */
13976 13974 mutex_enter(&ttgt->tgt_mutex);
13977 13975 nlun = ttgt->tgt_lun;
13978 13976 mutex_exit(&ttgt->tgt_mutex);
13979 13977 while ((tlun = nlun) != NULL) {
13980 13978 int restart = 0;
13981 13979 if (rlun && rlun != tlun) {
13982 13980 mutex_enter(&ttgt->tgt_mutex);
13983 13981 nlun = tlun->lun_next;
13984 13982 mutex_exit(&ttgt->tgt_mutex);
13985 13983 continue;
13986 13984 }
13987 13985 mutex_enter(&tlun->lun_mutex);
13988 13986 cmd = tlun->lun_pkt_head;
13989 13987 while (cmd != NULL) {
13990 13988 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13991 13989 struct scsi_pkt *pkt;
13992 13990
13993 13991 restart = 1;
13994 13992 cmd->cmd_state = FCP_PKT_ABORTING;
13995 13993 mutex_exit(&tlun->lun_mutex);
13996 13994 rval = fc_ulp_abort(pptr->port_fp_handle,
13997 13995 cmd->cmd_fp_pkt, KM_SLEEP);
13998 13996 if (rval == FC_SUCCESS) {
13999 13997 pkt = cmd->cmd_pkt;
14000 13998 pkt->pkt_reason = CMD_RESET;
14001 13999 pkt->pkt_statistics |= STAT_DEV_RESET;
14002 14000 cmd->cmd_state = FCP_PKT_IDLE;
14003 14001 fcp_post_callback(cmd);
14004 14002 } else {
14005 14003 caddr_t msg;
14006 14004
14007 14005 (void) fc_ulp_error(rval, &msg);
14008 14006
14009 14007 /*
14010 14008 * This part is tricky. The abort
14011 14009 * failed and now the command could
14012 14010 * be completing. The cmd_state ==
14013 14011 * FCP_PKT_ABORTING should save
14014 14012 * us in fcp_cmd_callback. If we
14015 14013 * are already aborting ignore the
14016 14014 * command in fcp_cmd_callback.
14017 14015 * Here we leave this packet for 20
14018 14016 * sec to be aborted in the
14019 14017 * fcp_watch thread.
14020 14018 */
14021 14019 fcp_log(CE_WARN, pptr->port_dip,
14022 14020 "!Abort failed after reset %s",
14023 14021 msg);
14024 14022
14025 14023 cmd->cmd_timeout =
14026 14024 fcp_watchdog_time +
14027 14025 cmd->cmd_pkt->pkt_time +
14028 14026 FCP_FAILED_DELAY;
14029 14027
14030 14028 cmd->cmd_fp_pkt->pkt_timeout =
14031 14029 FCP_INVALID_TIMEOUT;
14032 14030 /*
14033 14031 * This is a hack, cmd is put in the
14034 14032 * overflow queue so that it can be
14035 14033 * timed out finally
14036 14034 */
14037 14035 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14038 14036
14039 14037 mutex_enter(&pptr->port_pkt_mutex);
14040 14038 if (pptr->port_pkt_head) {
14041 14039 ASSERT(pptr->port_pkt_tail
14042 14040 != NULL);
14043 14041 pptr->port_pkt_tail->cmd_next
14044 14042 = cmd;
14045 14043 pptr->port_pkt_tail = cmd;
14046 14044 } else {
14047 14045 ASSERT(pptr->port_pkt_tail
14048 14046 == NULL);
14049 14047 pptr->port_pkt_head =
14050 14048 pptr->port_pkt_tail
14051 14049 = cmd;
14052 14050 }
14053 14051 cmd->cmd_next = NULL;
14054 14052 mutex_exit(&pptr->port_pkt_mutex);
14055 14053 }
14056 14054 mutex_enter(&tlun->lun_mutex);
14057 14055 cmd = tlun->lun_pkt_head;
14058 14056 } else {
14059 14057 cmd = cmd->cmd_forw;
14060 14058 }
14061 14059 }
14062 14060 mutex_exit(&tlun->lun_mutex);
14063 14061
14064 14062 mutex_enter(&ttgt->tgt_mutex);
14065 14063 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14066 14064 mutex_exit(&ttgt->tgt_mutex);
14067 14065
14068 14066 mutex_enter(&pptr->port_mutex);
14069 14067 if (tgt_cnt != ttgt->tgt_change_cnt) {
14070 14068 mutex_exit(&pptr->port_mutex);
14071 14069 return;
14072 14070 } else {
14073 14071 mutex_exit(&pptr->port_mutex);
14074 14072 }
14075 14073 }
14076 14074 }
14077 14075
14078 14076
14079 14077 /*
14080 14078 * unlink the soft state, returning the soft state found (if any)
14081 14079 *
14082 14080 * acquires and releases the global mutex
14083 14081 */
14084 14082 struct fcp_port *
14085 14083 fcp_soft_state_unlink(struct fcp_port *pptr)
14086 14084 {
14087 14085 struct fcp_port *hptr; /* ptr index */
14088 14086 struct fcp_port *tptr; /* prev hptr */
14089 14087
14090 14088 mutex_enter(&fcp_global_mutex);
14091 14089 for (hptr = fcp_port_head, tptr = NULL;
14092 14090 hptr != NULL;
14093 14091 tptr = hptr, hptr = hptr->port_next) {
14094 14092 if (hptr == pptr) {
14095 14093 /* we found a match -- remove this item */
14096 14094 if (tptr == NULL) {
14097 14095 /* we're at the head of the list */
14098 14096 fcp_port_head = hptr->port_next;
14099 14097 } else {
14100 14098 tptr->port_next = hptr->port_next;
14101 14099 }
14102 14100 break; /* success */
14103 14101 }
14104 14102 }
14105 14103 if (fcp_port_head == NULL) {
14106 14104 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14107 14105 }
14108 14106 mutex_exit(&fcp_global_mutex);
14109 14107 return (hptr);
14110 14108 }
14111 14109
14112 14110
14113 14111 /*
14114 14112 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14115 14113 * WWN and a LUN number
14116 14114 */
14117 14115 /* ARGSUSED */
14118 14116 static struct fcp_lun *
14119 14117 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14120 14118 {
14121 14119 int hash;
14122 14120 struct fcp_tgt *ptgt;
14123 14121 struct fcp_lun *plun;
14124 14122
14125 14123 ASSERT(mutex_owned(&pptr->port_mutex));
14126 14124
14127 14125 hash = FCP_HASH(wwn);
14128 14126 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14129 14127 ptgt = ptgt->tgt_next) {
14130 14128 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14131 14129 sizeof (ptgt->tgt_port_wwn)) == 0) {
14132 14130 mutex_enter(&ptgt->tgt_mutex);
14133 14131 for (plun = ptgt->tgt_lun;
14134 14132 plun != NULL;
14135 14133 plun = plun->lun_next) {
14136 14134 if (plun->lun_num == lun) {
14137 14135 mutex_exit(&ptgt->tgt_mutex);
14138 14136 return (plun);
14139 14137 }
14140 14138 }
14141 14139 mutex_exit(&ptgt->tgt_mutex);
14142 14140 return (NULL);
14143 14141 }
14144 14142 }
14145 14143 return (NULL);
14146 14144 }
14147 14145
14148 14146 /*
14149 14147 * Function: fcp_prepare_pkt
14150 14148 *
14151 14149 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14152 14150 * for fcp_start(). It binds the data or partially maps it.
14153 14151 * Builds the FCP header and starts the initialization of the
14154 14152 * Fibre Channel header.
14155 14153 *
14156 14154 * Argument: *pptr FCP port.
14157 14155 * *cmd FCP packet.
14158 14156 * *plun LUN the command will be sent to.
14159 14157 *
14160 14158 * Context: User, Kernel and Interrupt context.
14161 14159 */
14162 14160 static void
14163 14161 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14164 14162 struct fcp_lun *plun)
14165 14163 {
14166 14164 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14167 14165 struct fcp_tgt *ptgt = plun->lun_tgt;
14168 14166 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14169 14167
14170 14168 ASSERT(cmd->cmd_pkt->pkt_comp ||
14171 14169 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14172 14170
14173 14171 if (cmd->cmd_pkt->pkt_numcookies) {
14174 14172 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14175 14173 fcmd->fcp_cntl.cntl_read_data = 1;
14176 14174 fcmd->fcp_cntl.cntl_write_data = 0;
14177 14175 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14178 14176 } else {
14179 14177 fcmd->fcp_cntl.cntl_read_data = 0;
14180 14178 fcmd->fcp_cntl.cntl_write_data = 1;
14181 14179 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14182 14180 }
14183 14181
14184 14182 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14185 14183
14186 14184 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14187 14185 ASSERT(fpkt->pkt_data_cookie_cnt <=
14188 14186 pptr->port_data_dma_attr.dma_attr_sgllen);
14189 14187
14190 14188 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14191 14189
14192 14190 /* FCA needs pkt_datalen to be set */
14193 14191 fpkt->pkt_datalen = cmd->cmd_dmacount;
14194 14192 fcmd->fcp_data_len = cmd->cmd_dmacount;
14195 14193 } else {
14196 14194 fcmd->fcp_cntl.cntl_read_data = 0;
14197 14195 fcmd->fcp_cntl.cntl_write_data = 0;
14198 14196 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14199 14197 fpkt->pkt_datalen = 0;
14200 14198 fcmd->fcp_data_len = 0;
14201 14199 }
14202 14200
14203 14201 /* set up the Tagged Queuing type */
14204 14202 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14205 14203 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14206 14204 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14207 14205 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14208 14206 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14209 14207 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14210 14208 } else {
14211 14209 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14212 14210 }
14213 14211
14214 14212 fcmd->fcp_ent_addr = plun->lun_addr;
14215 14213
14216 14214 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14217 14215 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14218 14216 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14219 14217 } else {
14220 14218 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14221 14219 }
14222 14220
14223 14221 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14224 14222 cmd->cmd_pkt->pkt_state = 0;
14225 14223 cmd->cmd_pkt->pkt_statistics = 0;
14226 14224 cmd->cmd_pkt->pkt_resid = 0;
14227 14225
14228 14226 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14229 14227
14230 14228 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14231 14229 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14232 14230 fpkt->pkt_comp = NULL;
14233 14231 } else {
14234 14232 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14235 14233 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14236 14234 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14237 14235 }
14238 14236 fpkt->pkt_comp = fcp_cmd_callback;
14239 14237 }
14240 14238
14241 14239 mutex_enter(&pptr->port_mutex);
14242 14240 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14243 14241 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14244 14242 }
14245 14243 mutex_exit(&pptr->port_mutex);
14246 14244
14247 14245 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14248 14246 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14249 14247
14250 14248 /*
14251 14249 * Save a few kernel cycles here
14252 14250 */
14253 14251 #ifndef __lock_lint
14254 14252 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14255 14253 #endif /* __lock_lint */
14256 14254 }
14257 14255
14258 14256 static void
14259 14257 fcp_post_callback(struct fcp_pkt *cmd)
14260 14258 {
14261 14259 scsi_hba_pkt_comp(cmd->cmd_pkt);
14262 14260 }
14263 14261
14264 14262
14265 14263 /*
14266 14264 * called to do polled I/O by fcp_start()
14267 14265 *
14268 14266 * return a transport status value, i.e. TRAN_ACCECPT for success
14269 14267 */
14270 14268 static int
14271 14269 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14272 14270 {
14273 14271 int rval;
14274 14272
14275 14273 #ifdef DEBUG
14276 14274 mutex_enter(&pptr->port_pkt_mutex);
14277 14275 pptr->port_npkts++;
14278 14276 mutex_exit(&pptr->port_pkt_mutex);
14279 14277 #endif /* DEBUG */
14280 14278
14281 14279 if (cmd->cmd_fp_pkt->pkt_timeout) {
14282 14280 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14283 14281 } else {
14284 14282 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14285 14283 }
14286 14284
14287 14285 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14288 14286
14289 14287 cmd->cmd_state = FCP_PKT_ISSUED;
14290 14288
14291 14289 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14292 14290
14293 14291 #ifdef DEBUG
14294 14292 mutex_enter(&pptr->port_pkt_mutex);
14295 14293 pptr->port_npkts--;
14296 14294 mutex_exit(&pptr->port_pkt_mutex);
14297 14295 #endif /* DEBUG */
14298 14296
14299 14297 cmd->cmd_state = FCP_PKT_IDLE;
14300 14298
14301 14299 switch (rval) {
14302 14300 case FC_SUCCESS:
14303 14301 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14304 14302 fcp_complete_pkt(cmd->cmd_fp_pkt);
14305 14303 rval = TRAN_ACCEPT;
14306 14304 } else {
14307 14305 rval = TRAN_FATAL_ERROR;
14308 14306 }
14309 14307 break;
14310 14308
14311 14309 case FC_TRAN_BUSY:
14312 14310 rval = TRAN_BUSY;
14313 14311 cmd->cmd_pkt->pkt_resid = 0;
14314 14312 break;
14315 14313
14316 14314 case FC_BADPACKET:
14317 14315 rval = TRAN_BADPKT;
14318 14316 break;
14319 14317
14320 14318 default:
14321 14319 rval = TRAN_FATAL_ERROR;
14322 14320 break;
14323 14321 }
14324 14322
14325 14323 return (rval);
14326 14324 }
14327 14325
14328 14326
14329 14327 /*
14330 14328 * called by some of the following transport-called routines to convert
14331 14329 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14332 14330 */
14333 14331 static struct fcp_port *
14334 14332 fcp_dip2port(dev_info_t *dip)
14335 14333 {
14336 14334 int instance;
14337 14335
14338 14336 instance = ddi_get_instance(dip);
14339 14337 return (ddi_get_soft_state(fcp_softstate, instance));
14340 14338 }
14341 14339
14342 14340
14343 14341 /*
14344 14342 * called internally to return a LUN given a dip
14345 14343 */
14346 14344 struct fcp_lun *
14347 14345 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14348 14346 {
14349 14347 struct fcp_tgt *ptgt;
14350 14348 struct fcp_lun *plun;
14351 14349 int i;
14352 14350
14353 14351
14354 14352 ASSERT(mutex_owned(&pptr->port_mutex));
14355 14353
14356 14354 for (i = 0; i < FCP_NUM_HASH; i++) {
14357 14355 for (ptgt = pptr->port_tgt_hash_table[i];
14358 14356 ptgt != NULL;
14359 14357 ptgt = ptgt->tgt_next) {
14360 14358 mutex_enter(&ptgt->tgt_mutex);
14361 14359 for (plun = ptgt->tgt_lun; plun != NULL;
14362 14360 plun = plun->lun_next) {
14363 14361 mutex_enter(&plun->lun_mutex);
14364 14362 if (plun->lun_cip == cip) {
14365 14363 mutex_exit(&plun->lun_mutex);
14366 14364 mutex_exit(&ptgt->tgt_mutex);
14367 14365 return (plun); /* match found */
14368 14366 }
14369 14367 mutex_exit(&plun->lun_mutex);
14370 14368 }
14371 14369 mutex_exit(&ptgt->tgt_mutex);
14372 14370 }
14373 14371 }
14374 14372 return (NULL); /* no LUN found */
14375 14373 }
14376 14374
14377 14375 /*
14378 14376 * pass an element to the hotplug list, kick the hotplug thread
14379 14377 * and wait for the element to get processed by the hotplug thread.
14380 14378 * on return the element is freed.
14381 14379 *
14382 14380 * return zero success and non-zero on failure
14383 14381 *
14384 14382 * acquires/releases the target mutex
14385 14383 *
14386 14384 */
14387 14385 static int
14388 14386 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14389 14387 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14390 14388 {
14391 14389 struct fcp_hp_elem *elem;
14392 14390 int rval;
14393 14391
14394 14392 mutex_enter(&plun->lun_tgt->tgt_mutex);
14395 14393 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14396 14394 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14397 14395 mutex_exit(&plun->lun_tgt->tgt_mutex);
14398 14396 fcp_log(CE_CONT, pptr->port_dip,
14399 14397 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14400 14398 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14401 14399 return (NDI_FAILURE);
14402 14400 }
14403 14401 mutex_exit(&plun->lun_tgt->tgt_mutex);
14404 14402 mutex_enter(&elem->mutex);
14405 14403 if (elem->wait) {
14406 14404 while (elem->wait) {
14407 14405 cv_wait(&elem->cv, &elem->mutex);
14408 14406 }
14409 14407 }
14410 14408 rval = (elem->result);
14411 14409 mutex_exit(&elem->mutex);
14412 14410 mutex_destroy(&elem->mutex);
14413 14411 cv_destroy(&elem->cv);
14414 14412 kmem_free(elem, sizeof (struct fcp_hp_elem));
14415 14413 return (rval);
14416 14414 }
14417 14415
14418 14416 /*
14419 14417 * pass an element to the hotplug list, and then
14420 14418 * kick the hotplug thread
14421 14419 *
14422 14420 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14423 14421 *
14424 14422 * acquires/releases the hotplug mutex
14425 14423 *
14426 14424 * called with the target mutex owned
14427 14425 *
14428 14426 * memory acquired in NOSLEEP mode
14429 14427 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14430 14428 * for the hp daemon to process the request and is responsible for
14431 14429 * freeing the element
14432 14430 */
14433 14431 static struct fcp_hp_elem *
14434 14432 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14435 14433 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14436 14434 {
14437 14435 struct fcp_hp_elem *elem;
14438 14436 dev_info_t *pdip;
14439 14437
14440 14438 ASSERT(pptr != NULL);
14441 14439 ASSERT(plun != NULL);
14442 14440 ASSERT(plun->lun_tgt != NULL);
14443 14441 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14444 14442
14445 14443 /* create space for a hotplug element */
14446 14444 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14447 14445 == NULL) {
14448 14446 fcp_log(CE_WARN, NULL,
14449 14447 "!can't allocate memory for hotplug element");
14450 14448 return (NULL);
14451 14449 }
14452 14450
14453 14451 /* fill in hotplug element */
14454 14452 elem->port = pptr;
14455 14453 elem->lun = plun;
14456 14454 elem->cip = cip;
14457 14455 elem->old_lun_mpxio = plun->lun_mpxio;
14458 14456 elem->what = what;
14459 14457 elem->flags = flags;
14460 14458 elem->link_cnt = link_cnt;
14461 14459 elem->tgt_cnt = tgt_cnt;
14462 14460 elem->wait = wait;
14463 14461 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14464 14462 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14465 14463
14466 14464 /* schedule the hotplug task */
14467 14465 pdip = pptr->port_dip;
14468 14466 mutex_enter(&plun->lun_mutex);
14469 14467 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14470 14468 plun->lun_event_count++;
14471 14469 elem->event_cnt = plun->lun_event_count;
14472 14470 }
14473 14471 mutex_exit(&plun->lun_mutex);
14474 14472 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14475 14473 (void *)elem, KM_NOSLEEP) == NULL) {
14476 14474 mutex_enter(&plun->lun_mutex);
14477 14475 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14478 14476 plun->lun_event_count--;
14479 14477 }
14480 14478 mutex_exit(&plun->lun_mutex);
14481 14479 kmem_free(elem, sizeof (*elem));
14482 14480 return (0);
14483 14481 }
14484 14482
14485 14483 return (elem);
14486 14484 }
14487 14485
14488 14486
14489 14487 static void
14490 14488 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14491 14489 {
14492 14490 int rval;
14493 14491 struct scsi_address *ap;
14494 14492 struct fcp_lun *plun;
14495 14493 struct fcp_tgt *ptgt;
14496 14494 fc_packet_t *fpkt;
14497 14495
14498 14496 ap = &cmd->cmd_pkt->pkt_address;
14499 14497 plun = ADDR2LUN(ap);
14500 14498 ptgt = plun->lun_tgt;
14501 14499
14502 14500 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14503 14501
14504 14502 cmd->cmd_state = FCP_PKT_IDLE;
14505 14503
14506 14504 mutex_enter(&pptr->port_mutex);
14507 14505 mutex_enter(&ptgt->tgt_mutex);
14508 14506 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14509 14507 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14510 14508 fc_ulp_rscn_info_t *rscnp;
14511 14509
14512 14510 cmd->cmd_state = FCP_PKT_ISSUED;
14513 14511
14514 14512 /*
14515 14513 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14516 14514 * originally NULL, hence we try to set it to the pd pointed
14517 14515 * to by the SCSI device we're trying to get to.
14518 14516 */
14519 14517
14520 14518 fpkt = cmd->cmd_fp_pkt;
14521 14519 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14522 14520 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14523 14521 /*
14524 14522 * We need to notify the transport that we now have a
14525 14523 * reference to the remote port handle.
14526 14524 */
14527 14525 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14528 14526 }
14529 14527
14530 14528 mutex_exit(&ptgt->tgt_mutex);
14531 14529 mutex_exit(&pptr->port_mutex);
14532 14530
14533 14531 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14534 14532
14535 14533 /* prepare the packet */
14536 14534
14537 14535 fcp_prepare_pkt(pptr, cmd, plun);
14538 14536
14539 14537 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14540 14538 pkt_ulp_rscn_infop;
14541 14539
14542 14540 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14543 14541 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14544 14542
14545 14543 if (rscnp != NULL) {
14546 14544 rscnp->ulp_rscn_count =
14547 14545 fc_ulp_get_rscn_count(pptr->
14548 14546 port_fp_handle);
14549 14547 }
14550 14548
14551 14549 rval = fcp_transport(pptr->port_fp_handle,
14552 14550 cmd->cmd_fp_pkt, 0);
14553 14551
14554 14552 if (rval == FC_SUCCESS) {
14555 14553 return;
14556 14554 }
14557 14555 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14558 14556 } else {
14559 14557 mutex_exit(&ptgt->tgt_mutex);
14560 14558 mutex_exit(&pptr->port_mutex);
14561 14559 }
14562 14560
14563 14561 fcp_queue_pkt(pptr, cmd);
14564 14562 }
14565 14563
14566 14564
14567 14565 static void
14568 14566 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14569 14567 {
14570 14568 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14571 14569
14572 14570 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14573 14571 cmd->cmd_state = FCP_PKT_IDLE;
14574 14572
14575 14573 cmd->cmd_pkt->pkt_reason = reason;
14576 14574 cmd->cmd_pkt->pkt_state = 0;
14577 14575 cmd->cmd_pkt->pkt_statistics = statistics;
14578 14576
14579 14577 fcp_post_callback(cmd);
14580 14578 }
14581 14579
14582 14580 /*
14583 14581 * Function: fcp_queue_pkt
14584 14582 *
14585 14583 * Description: This function queues the packet passed by the caller into
14586 14584 * the list of packets of the FCP port.
14587 14585 *
14588 14586 * Argument: *pptr FCP port.
14589 14587 * *cmd FCP packet to queue.
14590 14588 *
14591 14589 * Return Value: None
14592 14590 *
14593 14591 * Context: User, Kernel and Interrupt context.
14594 14592 */
14595 14593 static void
14596 14594 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14597 14595 {
14598 14596 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14599 14597
14600 14598 mutex_enter(&pptr->port_pkt_mutex);
14601 14599 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14602 14600 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14603 14601 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14604 14602
14605 14603 /*
14606 14604 * zero pkt_time means hang around for ever
14607 14605 */
14608 14606 if (cmd->cmd_pkt->pkt_time) {
14609 14607 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14610 14608 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14611 14609 } else {
14612 14610 /*
14613 14611 * Indicate the watch thread to fail the
14614 14612 * command by setting it to highest value
14615 14613 */
14616 14614 cmd->cmd_timeout = fcp_watchdog_time;
14617 14615 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14618 14616 }
14619 14617 }
14620 14618
14621 14619 if (pptr->port_pkt_head) {
14622 14620 ASSERT(pptr->port_pkt_tail != NULL);
14623 14621
14624 14622 pptr->port_pkt_tail->cmd_next = cmd;
14625 14623 pptr->port_pkt_tail = cmd;
14626 14624 } else {
14627 14625 ASSERT(pptr->port_pkt_tail == NULL);
14628 14626
14629 14627 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14630 14628 }
14631 14629 cmd->cmd_next = NULL;
14632 14630 mutex_exit(&pptr->port_pkt_mutex);
14633 14631 }
14634 14632
14635 14633 /*
14636 14634 * Function: fcp_update_targets
14637 14635 *
14638 14636 * Description: This function applies the specified change of state to all
14639 14637 * the targets listed. The operation applied is 'set'.
14640 14638 *
14641 14639 * Argument: *pptr FCP port.
14642 14640 * *dev_list Array of fc_portmap_t structures.
14643 14641 * count Length of dev_list.
14644 14642 * state State bits to update.
14645 14643 * cause Reason for the update.
14646 14644 *
14647 14645 * Return Value: None
14648 14646 *
14649 14647 * Context: User, Kernel and Interrupt context.
14650 14648 * The mutex pptr->port_mutex must be held.
14651 14649 */
14652 14650 static void
14653 14651 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14654 14652 uint32_t count, uint32_t state, int cause)
14655 14653 {
14656 14654 fc_portmap_t *map_entry;
14657 14655 struct fcp_tgt *ptgt;
14658 14656
14659 14657 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14660 14658
14661 14659 while (count--) {
14662 14660 map_entry = &(dev_list[count]);
14663 14661 ptgt = fcp_lookup_target(pptr,
14664 14662 (uchar_t *)&(map_entry->map_pwwn));
14665 14663 if (ptgt == NULL) {
14666 14664 continue;
14667 14665 }
14668 14666
14669 14667 mutex_enter(&ptgt->tgt_mutex);
14670 14668 ptgt->tgt_trace = 0;
14671 14669 ptgt->tgt_change_cnt++;
14672 14670 ptgt->tgt_statec_cause = cause;
14673 14671 ptgt->tgt_tmp_cnt = 1;
14674 14672 fcp_update_tgt_state(ptgt, FCP_SET, state);
14675 14673 mutex_exit(&ptgt->tgt_mutex);
14676 14674 }
14677 14675 }
14678 14676
14679 14677 static int
14680 14678 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681 14679 int lcount, int tcount, int cause)
14682 14680 {
14683 14681 int rval;
14684 14682
14685 14683 mutex_enter(&pptr->port_mutex);
14686 14684 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14687 14685 mutex_exit(&pptr->port_mutex);
14688 14686
14689 14687 return (rval);
14690 14688 }
14691 14689
14692 14690
14693 14691 static int
14694 14692 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14695 14693 int lcount, int tcount, int cause)
14696 14694 {
14697 14695 int finish_init = 0;
14698 14696 int finish_tgt = 0;
14699 14697 int do_finish_init = 0;
14700 14698 int rval = FCP_NO_CHANGE;
14701 14699
14702 14700 if (cause == FCP_CAUSE_LINK_CHANGE ||
14703 14701 cause == FCP_CAUSE_LINK_DOWN) {
14704 14702 do_finish_init = 1;
14705 14703 }
14706 14704
14707 14705 if (ptgt != NULL) {
14708 14706 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14709 14707 FCP_BUF_LEVEL_2, 0,
14710 14708 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14711 14709 " cause = %d, d_id = 0x%x, tgt_done = %d",
14712 14710 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14713 14711 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14714 14712 ptgt->tgt_d_id, ptgt->tgt_done);
14715 14713
14716 14714 mutex_enter(&ptgt->tgt_mutex);
14717 14715
14718 14716 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14719 14717 rval = FCP_DEV_CHANGE;
14720 14718 if (do_finish_init && ptgt->tgt_done == 0) {
14721 14719 ptgt->tgt_done++;
14722 14720 finish_init = 1;
14723 14721 }
14724 14722 } else {
14725 14723 if (--ptgt->tgt_tmp_cnt <= 0) {
14726 14724 ptgt->tgt_tmp_cnt = 0;
14727 14725 finish_tgt = 1;
14728 14726
14729 14727 if (do_finish_init) {
14730 14728 finish_init = 1;
14731 14729 }
14732 14730 }
14733 14731 }
14734 14732 mutex_exit(&ptgt->tgt_mutex);
14735 14733 } else {
14736 14734 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14737 14735 FCP_BUF_LEVEL_2, 0,
14738 14736 "Call Finish Init for NO target");
14739 14737
14740 14738 if (do_finish_init) {
14741 14739 finish_init = 1;
14742 14740 }
14743 14741 }
14744 14742
14745 14743 if (finish_tgt) {
14746 14744 ASSERT(ptgt != NULL);
14747 14745
14748 14746 mutex_enter(&ptgt->tgt_mutex);
14749 14747 #ifdef DEBUG
14750 14748 bzero(ptgt->tgt_tmp_cnt_stack,
14751 14749 sizeof (ptgt->tgt_tmp_cnt_stack));
14752 14750
14753 14751 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14754 14752 FCP_STACK_DEPTH);
14755 14753 #endif /* DEBUG */
14756 14754 mutex_exit(&ptgt->tgt_mutex);
14757 14755
14758 14756 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14759 14757 }
14760 14758
14761 14759 if (finish_init && lcount == pptr->port_link_cnt) {
14762 14760 ASSERT(pptr->port_tmp_cnt > 0);
14763 14761 if (--pptr->port_tmp_cnt == 0) {
14764 14762 fcp_finish_init(pptr);
14765 14763 }
14766 14764 } else if (lcount != pptr->port_link_cnt) {
14767 14765 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14768 14766 fcp_trace, FCP_BUF_LEVEL_2, 0,
14769 14767 "fcp_call_finish_init_held,1: state change occured"
14770 14768 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14771 14769 }
14772 14770
14773 14771 return (rval);
14774 14772 }
14775 14773
14776 14774 static void
14777 14775 fcp_reconfigure_luns(void * tgt_handle)
14778 14776 {
14779 14777 uint32_t dev_cnt;
14780 14778 fc_portmap_t *devlist;
14781 14779 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14782 14780 struct fcp_port *pptr = ptgt->tgt_port;
14783 14781
14784 14782 /*
14785 14783 * If the timer that fires this off got canceled too late, the
14786 14784 * target could have been destroyed.
14787 14785 */
14788 14786
14789 14787 if (ptgt->tgt_tid == NULL) {
14790 14788 return;
14791 14789 }
14792 14790
14793 14791 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14794 14792 if (devlist == NULL) {
14795 14793 fcp_log(CE_WARN, pptr->port_dip,
14796 14794 "!fcp%d: failed to allocate for portmap",
14797 14795 pptr->port_instance);
14798 14796 return;
14799 14797 }
14800 14798
14801 14799 dev_cnt = 1;
14802 14800 devlist->map_pd = ptgt->tgt_pd_handle;
14803 14801 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14804 14802 devlist->map_did.port_id = ptgt->tgt_d_id;
14805 14803
14806 14804 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14807 14805 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14808 14806
14809 14807 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14810 14808 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14811 14809 devlist->map_flags = 0;
14812 14810
14813 14811 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14814 14812 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14815 14813
14816 14814 /*
14817 14815 * Clear the tgt_tid after no more references to
14818 14816 * the fcp_tgt
14819 14817 */
14820 14818 mutex_enter(&ptgt->tgt_mutex);
14821 14819 ptgt->tgt_tid = NULL;
14822 14820 mutex_exit(&ptgt->tgt_mutex);
14823 14821
14824 14822 kmem_free(devlist, sizeof (*devlist));
14825 14823 }
14826 14824
14827 14825
14828 14826 static void
14829 14827 fcp_free_targets(struct fcp_port *pptr)
14830 14828 {
14831 14829 int i;
14832 14830 struct fcp_tgt *ptgt;
14833 14831
14834 14832 mutex_enter(&pptr->port_mutex);
14835 14833 for (i = 0; i < FCP_NUM_HASH; i++) {
14836 14834 ptgt = pptr->port_tgt_hash_table[i];
14837 14835 while (ptgt != NULL) {
14838 14836 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14839 14837
14840 14838 fcp_free_target(ptgt);
14841 14839 ptgt = next_tgt;
14842 14840 }
14843 14841 }
14844 14842 mutex_exit(&pptr->port_mutex);
14845 14843 }
14846 14844
14847 14845
14848 14846 static void
14849 14847 fcp_free_target(struct fcp_tgt *ptgt)
14850 14848 {
14851 14849 struct fcp_lun *plun;
14852 14850 timeout_id_t tid;
14853 14851
14854 14852 mutex_enter(&ptgt->tgt_mutex);
14855 14853 tid = ptgt->tgt_tid;
14856 14854
14857 14855 /*
14858 14856 * Cancel any pending timeouts for this target.
14859 14857 */
14860 14858
14861 14859 if (tid != NULL) {
14862 14860 /*
14863 14861 * Set tgt_tid to NULL first to avoid a race in the callback.
14864 14862 * If tgt_tid is NULL, the callback will simply return.
14865 14863 */
14866 14864 ptgt->tgt_tid = NULL;
14867 14865 mutex_exit(&ptgt->tgt_mutex);
14868 14866 (void) untimeout(tid);
14869 14867 mutex_enter(&ptgt->tgt_mutex);
14870 14868 }
14871 14869
14872 14870 plun = ptgt->tgt_lun;
14873 14871 while (plun != NULL) {
14874 14872 struct fcp_lun *next_lun = plun->lun_next;
14875 14873
14876 14874 fcp_dealloc_lun(plun);
14877 14875 plun = next_lun;
14878 14876 }
14879 14877
14880 14878 mutex_exit(&ptgt->tgt_mutex);
14881 14879 fcp_dealloc_tgt(ptgt);
14882 14880 }
14883 14881
14884 14882 /*
14885 14883 * Function: fcp_is_retryable
14886 14884 *
14887 14885 * Description: Indicates if the internal packet is retryable.
14888 14886 *
14889 14887 * Argument: *icmd FCP internal packet.
14890 14888 *
14891 14889 * Return Value: 0 Not retryable
14892 14890 * 1 Retryable
14893 14891 *
14894 14892 * Context: User, Kernel and Interrupt context
14895 14893 */
14896 14894 static int
14897 14895 fcp_is_retryable(struct fcp_ipkt *icmd)
14898 14896 {
14899 14897 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14900 14898 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14901 14899 return (0);
14902 14900 }
14903 14901
14904 14902 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14905 14903 icmd->ipkt_port->port_deadline) ? 1 : 0);
14906 14904 }
14907 14905
14908 14906 /*
14909 14907 * Function: fcp_create_on_demand
14910 14908 *
14911 14909 * Argument: *pptr FCP port.
14912 14910 * *pwwn Port WWN.
14913 14911 *
14914 14912 * Return Value: 0 Success
14915 14913 * EIO
14916 14914 * ENOMEM
14917 14915 * EBUSY
14918 14916 * EINVAL
14919 14917 *
14920 14918 * Context: User and Kernel context
14921 14919 */
14922 14920 static int
14923 14921 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14924 14922 {
14925 14923 int wait_ms;
14926 14924 int tcount;
14927 14925 int lcount;
14928 14926 int ret;
14929 14927 int error;
14930 14928 int rval = EIO;
14931 14929 int ntries;
14932 14930 fc_portmap_t *devlist;
14933 14931 opaque_t pd;
14934 14932 struct fcp_lun *plun;
14935 14933 struct fcp_tgt *ptgt;
14936 14934 int old_manual = 0;
14937 14935
14938 14936 /* Allocates the fc_portmap_t structure. */
14939 14937 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14940 14938
14941 14939 /*
14942 14940 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14943 14941 * in the commented statement below:
14944 14942 *
14945 14943 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14946 14944 *
14947 14945 * Below, the deadline for the discovery process is set.
14948 14946 */
14949 14947 mutex_enter(&pptr->port_mutex);
14950 14948 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14951 14949 mutex_exit(&pptr->port_mutex);
14952 14950
14953 14951 /*
14954 14952 * We try to find the remote port based on the WWN provided by the
14955 14953 * caller. We actually ask fp/fctl if it has it.
14956 14954 */
14957 14955 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14958 14956 (la_wwn_t *)pwwn, &error, 1);
14959 14957
14960 14958 if (pd == NULL) {
14961 14959 kmem_free(devlist, sizeof (*devlist));
14962 14960 return (rval);
14963 14961 }
14964 14962
14965 14963 /*
14966 14964 * The remote port was found. We ask fp/fctl to update our
14967 14965 * fc_portmap_t structure.
14968 14966 */
14969 14967 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14970 14968 (la_wwn_t *)pwwn, devlist);
14971 14969 if (ret != FC_SUCCESS) {
14972 14970 kmem_free(devlist, sizeof (*devlist));
14973 14971 return (rval);
14974 14972 }
14975 14973
14976 14974 /*
14977 14975 * The map flag field is set to indicates that the creation is being
14978 14976 * done at the user request (Ioclt probably luxadm or cfgadm).
14979 14977 */
14980 14978 devlist->map_type = PORT_DEVICE_USER_CREATE;
14981 14979
14982 14980 mutex_enter(&pptr->port_mutex);
14983 14981
14984 14982 /*
14985 14983 * We check to see if fcp already has a target that describes the
14986 14984 * device being created. If not it is created.
14987 14985 */
14988 14986 ptgt = fcp_lookup_target(pptr, pwwn);
14989 14987 if (ptgt == NULL) {
14990 14988 lcount = pptr->port_link_cnt;
14991 14989 mutex_exit(&pptr->port_mutex);
14992 14990
14993 14991 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14994 14992 if (ptgt == NULL) {
14995 14993 fcp_log(CE_WARN, pptr->port_dip,
14996 14994 "!FC target allocation failed");
14997 14995 return (ENOMEM);
14998 14996 }
14999 14997
15000 14998 mutex_enter(&pptr->port_mutex);
15001 14999 }
15002 15000
15003 15001 mutex_enter(&ptgt->tgt_mutex);
15004 15002 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
15005 15003 ptgt->tgt_tmp_cnt = 1;
15006 15004 ptgt->tgt_device_created = 0;
15007 15005 /*
15008 15006 * If fabric and auto config is set but the target was
15009 15007 * manually unconfigured then reset to the manual_config_only to
15010 15008 * 0 so the device will get configured.
15011 15009 */
15012 15010 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15013 15011 fcp_enable_auto_configuration &&
15014 15012 ptgt->tgt_manual_config_only == 1) {
15015 15013 old_manual = 1;
15016 15014 ptgt->tgt_manual_config_only = 0;
15017 15015 }
15018 15016 mutex_exit(&ptgt->tgt_mutex);
15019 15017
15020 15018 fcp_update_targets(pptr, devlist, 1,
15021 15019 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15022 15020
15023 15021 lcount = pptr->port_link_cnt;
15024 15022 tcount = ptgt->tgt_change_cnt;
15025 15023
15026 15024 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15027 15025 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15028 15026 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15029 15027 fcp_enable_auto_configuration && old_manual) {
15030 15028 mutex_enter(&ptgt->tgt_mutex);
15031 15029 ptgt->tgt_manual_config_only = 1;
15032 15030 mutex_exit(&ptgt->tgt_mutex);
15033 15031 }
15034 15032
15035 15033 if (pptr->port_link_cnt != lcount ||
15036 15034 ptgt->tgt_change_cnt != tcount) {
15037 15035 rval = EBUSY;
15038 15036 }
15039 15037 mutex_exit(&pptr->port_mutex);
15040 15038
15041 15039 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15042 15040 FCP_BUF_LEVEL_3, 0,
15043 15041 "fcp_create_on_demand: mapflags ptgt=%x, "
15044 15042 "lcount=%x::port_link_cnt=%x, "
15045 15043 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15046 15044 ptgt, lcount, pptr->port_link_cnt,
15047 15045 tcount, ptgt->tgt_change_cnt, rval);
15048 15046 return (rval);
15049 15047 }
15050 15048
15051 15049 /*
15052 15050 * Due to lack of synchronization mechanisms, we perform
15053 15051 * periodic monitoring of our request; Because requests
15054 15052 * get dropped when another one supercedes (either because
15055 15053 * of a link change or a target change), it is difficult to
15056 15054 * provide a clean synchronization mechanism (such as a
15057 15055 * semaphore or a conditional variable) without exhaustively
15058 15056 * rewriting the mainline discovery code of this driver.
15059 15057 */
15060 15058 wait_ms = 500;
15061 15059
15062 15060 ntries = fcp_max_target_retries;
15063 15061
15064 15062 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15065 15063 FCP_BUF_LEVEL_3, 0,
15066 15064 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15067 15065 "lcount=%x::port_link_cnt=%x, "
15068 15066 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15069 15067 "tgt_tmp_cnt =%x",
15070 15068 ntries, ptgt, lcount, pptr->port_link_cnt,
15071 15069 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15072 15070 ptgt->tgt_tmp_cnt);
15073 15071
15074 15072 mutex_enter(&ptgt->tgt_mutex);
15075 15073 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15076 15074 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15077 15075 mutex_exit(&ptgt->tgt_mutex);
15078 15076 mutex_exit(&pptr->port_mutex);
15079 15077
15080 15078 delay(drv_usectohz(wait_ms * 1000));
15081 15079
15082 15080 mutex_enter(&pptr->port_mutex);
15083 15081 mutex_enter(&ptgt->tgt_mutex);
15084 15082 }
15085 15083
15086 15084
15087 15085 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15088 15086 rval = EBUSY;
15089 15087 } else {
15090 15088 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15091 15089 FCP_TGT_NODE_PRESENT) {
15092 15090 rval = 0;
15093 15091 }
15094 15092 }
15095 15093
15096 15094 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15097 15095 FCP_BUF_LEVEL_3, 0,
15098 15096 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15099 15097 "lcount=%x::port_link_cnt=%x, "
15100 15098 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15101 15099 "tgt_tmp_cnt =%x",
15102 15100 ntries, ptgt, lcount, pptr->port_link_cnt,
15103 15101 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15104 15102 ptgt->tgt_tmp_cnt);
15105 15103
15106 15104 if (rval) {
15107 15105 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15108 15106 fcp_enable_auto_configuration && old_manual) {
15109 15107 ptgt->tgt_manual_config_only = 1;
15110 15108 }
15111 15109 mutex_exit(&ptgt->tgt_mutex);
15112 15110 mutex_exit(&pptr->port_mutex);
15113 15111 kmem_free(devlist, sizeof (*devlist));
15114 15112
15115 15113 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15116 15114 FCP_BUF_LEVEL_3, 0,
15117 15115 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15118 15116 "lcount=%x::port_link_cnt=%x, "
15119 15117 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15120 15118 "tgt_device_created=%x, tgt D_ID=%x",
15121 15119 ntries, ptgt, lcount, pptr->port_link_cnt,
15122 15120 tcount, ptgt->tgt_change_cnt, rval,
15123 15121 ptgt->tgt_device_created, ptgt->tgt_d_id);
15124 15122 return (rval);
15125 15123 }
15126 15124
15127 15125 if ((plun = ptgt->tgt_lun) != NULL) {
15128 15126 tcount = plun->lun_tgt->tgt_change_cnt;
15129 15127 } else {
15130 15128 rval = EINVAL;
15131 15129 }
15132 15130 lcount = pptr->port_link_cnt;
15133 15131
15134 15132 /*
15135 15133 * Configuring the target with no LUNs will fail. We
15136 15134 * should reset the node state so that it is not
15137 15135 * automatically configured when the LUNs are added
15138 15136 * to this target.
15139 15137 */
15140 15138 if (ptgt->tgt_lun_cnt == 0) {
15141 15139 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15142 15140 }
15143 15141 mutex_exit(&ptgt->tgt_mutex);
15144 15142 mutex_exit(&pptr->port_mutex);
15145 15143
15146 15144 while (plun) {
15147 15145 child_info_t *cip;
15148 15146
15149 15147 mutex_enter(&plun->lun_mutex);
15150 15148 cip = plun->lun_cip;
15151 15149 mutex_exit(&plun->lun_mutex);
15152 15150
15153 15151 mutex_enter(&ptgt->tgt_mutex);
15154 15152 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15155 15153 mutex_exit(&ptgt->tgt_mutex);
15156 15154
15157 15155 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15158 15156 FCP_ONLINE, lcount, tcount,
15159 15157 NDI_ONLINE_ATTACH);
15160 15158 if (rval != NDI_SUCCESS) {
15161 15159 FCP_TRACE(fcp_logq,
15162 15160 pptr->port_instbuf, fcp_trace,
15163 15161 FCP_BUF_LEVEL_3, 0,
15164 15162 "fcp_create_on_demand: "
15165 15163 "pass_to_hp_and_wait failed "
15166 15164 "rval=%x", rval);
15167 15165 rval = EIO;
15168 15166 } else {
15169 15167 mutex_enter(&LUN_TGT->tgt_mutex);
15170 15168 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15171 15169 FCP_LUN_BUSY);
15172 15170 mutex_exit(&LUN_TGT->tgt_mutex);
15173 15171 }
15174 15172 mutex_enter(&ptgt->tgt_mutex);
15175 15173 }
15176 15174
15177 15175 plun = plun->lun_next;
15178 15176 mutex_exit(&ptgt->tgt_mutex);
15179 15177 }
15180 15178
15181 15179 kmem_free(devlist, sizeof (*devlist));
15182 15180
15183 15181 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15184 15182 fcp_enable_auto_configuration && old_manual) {
15185 15183 mutex_enter(&ptgt->tgt_mutex);
15186 15184 /* if successful then set manual to 0 */
15187 15185 if (rval == 0) {
15188 15186 ptgt->tgt_manual_config_only = 0;
15189 15187 } else {
15190 15188 /* reset to 1 so the user has to do the config */
15191 15189 ptgt->tgt_manual_config_only = 1;
15192 15190 }
15193 15191 mutex_exit(&ptgt->tgt_mutex);
15194 15192 }
15195 15193
15196 15194 return (rval);
15197 15195 }
15198 15196
15199 15197
15200 15198 static void
15201 15199 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15202 15200 {
15203 15201 int count;
15204 15202 uchar_t byte;
15205 15203
15206 15204 count = 0;
15207 15205 while (*string) {
15208 15206 byte = FCP_ATOB(*string); string++;
15209 15207 byte = byte << 4 | FCP_ATOB(*string); string++;
15210 15208 bytes[count++] = byte;
15211 15209
15212 15210 if (count >= byte_len) {
15213 15211 break;
15214 15212 }
15215 15213 }
15216 15214 }
15217 15215
15218 15216 static void
15219 15217 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15220 15218 {
15221 15219 int i;
15222 15220
15223 15221 for (i = 0; i < FC_WWN_SIZE; i++) {
15224 15222 (void) sprintf(string + (i * 2),
15225 15223 "%02x", wwn[i]);
15226 15224 }
15227 15225
15228 15226 }
15229 15227
15230 15228 static void
15231 15229 fcp_print_error(fc_packet_t *fpkt)
15232 15230 {
15233 15231 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15234 15232 fpkt->pkt_ulp_private;
15235 15233 struct fcp_port *pptr;
15236 15234 struct fcp_tgt *ptgt;
15237 15235 struct fcp_lun *plun;
15238 15236 caddr_t buf;
15239 15237 int scsi_cmd = 0;
15240 15238
15241 15239 ptgt = icmd->ipkt_tgt;
15242 15240 plun = icmd->ipkt_lun;
15243 15241 pptr = ptgt->tgt_port;
15244 15242
15245 15243 buf = kmem_zalloc(256, KM_NOSLEEP);
15246 15244 if (buf == NULL) {
15247 15245 return;
15248 15246 }
15249 15247
15250 15248 switch (icmd->ipkt_opcode) {
15251 15249 case SCMD_REPORT_LUN:
15252 15250 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15253 15251 " lun=0x%%x failed");
15254 15252 scsi_cmd++;
15255 15253 break;
15256 15254
15257 15255 case SCMD_INQUIRY_PAGE83:
15258 15256 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15259 15257 " lun=0x%%x failed");
15260 15258 scsi_cmd++;
15261 15259 break;
15262 15260
15263 15261 case SCMD_INQUIRY:
15264 15262 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15265 15263 " lun=0x%%x failed");
15266 15264 scsi_cmd++;
15267 15265 break;
15268 15266
15269 15267 case LA_ELS_PLOGI:
15270 15268 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15271 15269 break;
15272 15270
15273 15271 case LA_ELS_PRLI:
15274 15272 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15275 15273 break;
15276 15274 }
15277 15275
15278 15276 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15279 15277 struct fcp_rsp response, *rsp;
15280 15278 uchar_t asc, ascq;
15281 15279 caddr_t sense_key = NULL;
15282 15280 struct fcp_rsp_info fcp_rsp_err, *bep;
15283 15281
15284 15282 if (icmd->ipkt_nodma) {
15285 15283 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15286 15284 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15287 15285 sizeof (struct fcp_rsp));
15288 15286 } else {
15289 15287 rsp = &response;
15290 15288 bep = &fcp_rsp_err;
15291 15289
15292 15290 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15293 15291 sizeof (struct fcp_rsp));
15294 15292
15295 15293 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15296 15294 bep, fpkt->pkt_resp_acc,
15297 15295 sizeof (struct fcp_rsp_info));
15298 15296 }
15299 15297
15300 15298
15301 15299 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15302 15300 (void) sprintf(buf + strlen(buf),
15303 15301 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15304 15302 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15305 15303 " senselen=%%x. Giving up");
15306 15304
15307 15305 fcp_log(CE_WARN, pptr->port_dip, buf,
15308 15306 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15309 15307 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15310 15308 rsp->fcp_u.fcp_status.reserved_1,
15311 15309 rsp->fcp_response_len, rsp->fcp_sense_len);
15312 15310
15313 15311 kmem_free(buf, 256);
15314 15312 return;
15315 15313 }
15316 15314
15317 15315 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15318 15316 bep->rsp_code != FCP_NO_FAILURE) {
15319 15317 (void) sprintf(buf + strlen(buf),
15320 15318 " FCP Response code = 0x%x", bep->rsp_code);
15321 15319 }
15322 15320
15323 15321 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15324 15322 struct scsi_extended_sense sense_info, *sense_ptr;
15325 15323
15326 15324 if (icmd->ipkt_nodma) {
15327 15325 sense_ptr = (struct scsi_extended_sense *)
15328 15326 ((caddr_t)fpkt->pkt_resp +
15329 15327 sizeof (struct fcp_rsp) +
15330 15328 rsp->fcp_response_len);
15331 15329 } else {
15332 15330 sense_ptr = &sense_info;
15333 15331
15334 15332 FCP_CP_IN(fpkt->pkt_resp +
15335 15333 sizeof (struct fcp_rsp) +
15336 15334 rsp->fcp_response_len, &sense_info,
15337 15335 fpkt->pkt_resp_acc,
15338 15336 sizeof (struct scsi_extended_sense));
15339 15337 }
15340 15338
15341 15339 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15342 15340 NUM_IMPL_SENSE_KEYS) {
15343 15341 sense_key = sense_keys[sense_ptr->es_key];
15344 15342 } else {
15345 15343 sense_key = "Undefined";
15346 15344 }
15347 15345
15348 15346 asc = sense_ptr->es_add_code;
15349 15347 ascq = sense_ptr->es_qual_code;
15350 15348
15351 15349 (void) sprintf(buf + strlen(buf),
15352 15350 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15353 15351 " Giving up");
15354 15352
15355 15353 fcp_log(CE_WARN, pptr->port_dip, buf,
15356 15354 ptgt->tgt_d_id, plun->lun_num, sense_key,
15357 15355 asc, ascq);
15358 15356 } else {
15359 15357 (void) sprintf(buf + strlen(buf),
15360 15358 " : SCSI status=%%x. Giving up");
15361 15359
15362 15360 fcp_log(CE_WARN, pptr->port_dip, buf,
15363 15361 ptgt->tgt_d_id, plun->lun_num,
15364 15362 rsp->fcp_u.fcp_status.scsi_status);
15365 15363 }
15366 15364 } else {
15367 15365 caddr_t state, reason, action, expln;
15368 15366
15369 15367 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15370 15368 &action, &expln);
15371 15369
15372 15370 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15373 15371 " Reason:%%s. Giving up");
15374 15372
15375 15373 if (scsi_cmd) {
15376 15374 fcp_log(CE_WARN, pptr->port_dip, buf,
15377 15375 ptgt->tgt_d_id, plun->lun_num, state, reason);
15378 15376 } else {
15379 15377 fcp_log(CE_WARN, pptr->port_dip, buf,
15380 15378 ptgt->tgt_d_id, state, reason);
15381 15379 }
15382 15380 }
15383 15381
15384 15382 kmem_free(buf, 256);
15385 15383 }
15386 15384
15387 15385
15388 15386 static int
15389 15387 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15390 15388 struct fcp_ipkt *icmd, int rval, caddr_t op)
15391 15389 {
15392 15390 int ret = DDI_FAILURE;
15393 15391 char *error;
15394 15392
15395 15393 switch (rval) {
15396 15394 case FC_DEVICE_BUSY_NEW_RSCN:
15397 15395 /*
15398 15396 * This means that there was a new RSCN that the transport
15399 15397 * knows about (which the ULP *may* know about too) but the
15400 15398 * pkt that was sent down was related to an older RSCN. So, we
15401 15399 * are just going to reset the retry count and deadline and
15402 15400 * continue to retry. The idea is that transport is currently
15403 15401 * working on the new RSCN and will soon let the ULPs know
15404 15402 * about it and when it does the existing logic will kick in
15405 15403 * where it will change the tcount to indicate that something
15406 15404 * changed on the target. So, rediscovery will start and there
15407 15405 * will not be an infinite retry.
15408 15406 *
15409 15407 * For a full flow of how the RSCN info is transferred back and
15410 15408 * forth, see fp.c
15411 15409 */
15412 15410 icmd->ipkt_retries = 0;
15413 15411 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15414 15412 FCP_ICMD_DEADLINE;
15415 15413
15416 15414 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15417 15415 FCP_BUF_LEVEL_3, 0,
15418 15416 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15419 15417 rval, ptgt->tgt_d_id);
15420 15418 /* FALLTHROUGH */
15421 15419
15422 15420 case FC_STATEC_BUSY:
15423 15421 case FC_DEVICE_BUSY:
15424 15422 case FC_PBUSY:
15425 15423 case FC_FBUSY:
15426 15424 case FC_TRAN_BUSY:
15427 15425 case FC_OFFLINE:
15428 15426 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15429 15427 FCP_BUF_LEVEL_3, 0,
15430 15428 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15431 15429 rval, ptgt->tgt_d_id);
15432 15430 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15433 15431 fcp_is_retryable(icmd)) {
15434 15432 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15435 15433 ret = DDI_SUCCESS;
15436 15434 }
15437 15435 break;
15438 15436
15439 15437 case FC_LOGINREQ:
15440 15438 /*
15441 15439 * FC_LOGINREQ used to be handled just like all the cases
15442 15440 * above. It has been changed to handled a PRLI that fails
15443 15441 * with FC_LOGINREQ different than other ipkts that fail
15444 15442 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15445 15443 * a simple matter to turn it into a PLOGI instead, so that's
15446 15444 * exactly what we do here.
15447 15445 */
15448 15446 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15449 15447 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15450 15448 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15451 15449 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15452 15450 } else {
15453 15451 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15454 15452 FCP_BUF_LEVEL_3, 0,
15455 15453 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15456 15454 rval, ptgt->tgt_d_id);
15457 15455 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15458 15456 fcp_is_retryable(icmd)) {
15459 15457 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15460 15458 ret = DDI_SUCCESS;
15461 15459 }
15462 15460 }
15463 15461 break;
15464 15462
15465 15463 default:
15466 15464 mutex_enter(&pptr->port_mutex);
15467 15465 mutex_enter(&ptgt->tgt_mutex);
15468 15466 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15469 15467 mutex_exit(&ptgt->tgt_mutex);
15470 15468 mutex_exit(&pptr->port_mutex);
15471 15469
15472 15470 (void) fc_ulp_error(rval, &error);
15473 15471 fcp_log(CE_WARN, pptr->port_dip,
15474 15472 "!Failed to send %s to D_ID=%x error=%s",
15475 15473 op, ptgt->tgt_d_id, error);
15476 15474 } else {
15477 15475 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15478 15476 fcp_trace, FCP_BUF_LEVEL_2, 0,
15479 15477 "fcp_handle_ipkt_errors,1: state change occured"
15480 15478 " for D_ID=0x%x", ptgt->tgt_d_id);
15481 15479 mutex_exit(&ptgt->tgt_mutex);
15482 15480 mutex_exit(&pptr->port_mutex);
15483 15481 }
15484 15482 break;
15485 15483 }
15486 15484
15487 15485 return (ret);
15488 15486 }
15489 15487
15490 15488
15491 15489 /*
15492 15490 * Check of outstanding commands on any LUN for this target
15493 15491 */
15494 15492 static int
15495 15493 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15496 15494 {
15497 15495 struct fcp_lun *plun;
15498 15496 struct fcp_pkt *cmd;
15499 15497
15500 15498 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15501 15499 mutex_enter(&plun->lun_mutex);
15502 15500 for (cmd = plun->lun_pkt_head; cmd != NULL;
15503 15501 cmd = cmd->cmd_forw) {
15504 15502 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15505 15503 mutex_exit(&plun->lun_mutex);
15506 15504 return (FC_SUCCESS);
15507 15505 }
15508 15506 }
15509 15507 mutex_exit(&plun->lun_mutex);
15510 15508 }
15511 15509
15512 15510 return (FC_FAILURE);
15513 15511 }
15514 15512
15515 15513 static fc_portmap_t *
15516 15514 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15517 15515 {
15518 15516 int i;
15519 15517 fc_portmap_t *devlist;
15520 15518 fc_portmap_t *devptr = NULL;
15521 15519 struct fcp_tgt *ptgt;
15522 15520
15523 15521 mutex_enter(&pptr->port_mutex);
15524 15522 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15525 15523 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15526 15524 ptgt = ptgt->tgt_next) {
15527 15525 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15528 15526 ++*dev_cnt;
15529 15527 }
15530 15528 }
15531 15529 }
15532 15530
15533 15531 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15534 15532 KM_NOSLEEP);
15535 15533 if (devlist == NULL) {
15536 15534 mutex_exit(&pptr->port_mutex);
15537 15535 fcp_log(CE_WARN, pptr->port_dip,
15538 15536 "!fcp%d: failed to allocate for portmap for construct map",
15539 15537 pptr->port_instance);
15540 15538 return (devptr);
15541 15539 }
15542 15540
15543 15541 for (i = 0; i < FCP_NUM_HASH; i++) {
15544 15542 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15545 15543 ptgt = ptgt->tgt_next) {
15546 15544 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15547 15545 int ret;
15548 15546
15549 15547 ret = fc_ulp_pwwn_to_portmap(
15550 15548 pptr->port_fp_handle,
15551 15549 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15552 15550 devlist);
15553 15551
15554 15552 if (ret == FC_SUCCESS) {
15555 15553 devlist++;
15556 15554 continue;
15557 15555 }
15558 15556
15559 15557 devlist->map_pd = NULL;
15560 15558 devlist->map_did.port_id = ptgt->tgt_d_id;
15561 15559 devlist->map_hard_addr.hard_addr =
15562 15560 ptgt->tgt_hard_addr;
15563 15561
15564 15562 devlist->map_state = PORT_DEVICE_INVALID;
15565 15563 devlist->map_type = PORT_DEVICE_OLD;
15566 15564
15567 15565 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15568 15566 &devlist->map_nwwn, FC_WWN_SIZE);
15569 15567
15570 15568 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15571 15569 &devlist->map_pwwn, FC_WWN_SIZE);
15572 15570
15573 15571 devlist++;
15574 15572 }
15575 15573 }
15576 15574 }
15577 15575
15578 15576 mutex_exit(&pptr->port_mutex);
15579 15577
15580 15578 return (devptr);
15581 15579 }
15582 15580 /*
15583 15581 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15584 15582 */
15585 15583 static void
15586 15584 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15587 15585 {
15588 15586 int i;
15589 15587 struct fcp_tgt *ptgt;
15590 15588 struct fcp_lun *plun;
15591 15589
15592 15590 for (i = 0; i < FCP_NUM_HASH; i++) {
15593 15591 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15594 15592 ptgt = ptgt->tgt_next) {
15595 15593 mutex_enter(&ptgt->tgt_mutex);
15596 15594 for (plun = ptgt->tgt_lun; plun != NULL;
15597 15595 plun = plun->lun_next) {
15598 15596 if (plun->lun_mpxio &&
15599 15597 plun->lun_state & FCP_LUN_BUSY) {
15600 15598 if (!fcp_pass_to_hp(pptr, plun,
15601 15599 plun->lun_cip,
15602 15600 FCP_MPXIO_PATH_SET_BUSY,
15603 15601 pptr->port_link_cnt,
15604 15602 ptgt->tgt_change_cnt, 0, 0)) {
15605 15603 FCP_TRACE(fcp_logq,
15606 15604 pptr->port_instbuf,
15607 15605 fcp_trace,
15608 15606 FCP_BUF_LEVEL_2, 0,
15609 15607 "path_verifybusy: "
15610 15608 "disable lun %p failed!",
15611 15609 plun);
15612 15610 }
15613 15611 }
15614 15612 }
15615 15613 mutex_exit(&ptgt->tgt_mutex);
15616 15614 }
15617 15615 }
15618 15616 }
15619 15617
15620 15618 static int
15621 15619 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15622 15620 {
15623 15621 dev_info_t *cdip = NULL;
15624 15622 dev_info_t *pdip = NULL;
15625 15623
15626 15624 ASSERT(plun);
15627 15625
15628 15626 mutex_enter(&plun->lun_mutex);
15629 15627 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15630 15628 mutex_exit(&plun->lun_mutex);
15631 15629 return (NDI_FAILURE);
15632 15630 }
15633 15631 mutex_exit(&plun->lun_mutex);
15634 15632 cdip = mdi_pi_get_client(PIP(cip));
15635 15633 pdip = mdi_pi_get_phci(PIP(cip));
15636 15634
15637 15635 ASSERT(cdip != NULL);
15638 15636 ASSERT(pdip != NULL);
15639 15637
15640 15638 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15641 15639 /* LUN ready for IO */
15642 15640 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15643 15641 } else {
15644 15642 /* LUN busy to accept IO */
15645 15643 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15646 15644 }
15647 15645 return (NDI_SUCCESS);
15648 15646 }
15649 15647
15650 15648 /*
15651 15649 * Caller must free the returned string of MAXPATHLEN len
15652 15650 * If the device is offline (-1 instance number) NULL
15653 15651 * will be returned.
15654 15652 */
15655 15653 static char *
15656 15654 fcp_get_lun_path(struct fcp_lun *plun) {
15657 15655 dev_info_t *dip = NULL;
15658 15656 char *path = NULL;
15659 15657 mdi_pathinfo_t *pip = NULL;
15660 15658
15661 15659 if (plun == NULL) {
15662 15660 return (NULL);
15663 15661 }
15664 15662
15665 15663 mutex_enter(&plun->lun_mutex);
15666 15664 if (plun->lun_mpxio == 0) {
15667 15665 dip = DIP(plun->lun_cip);
15668 15666 mutex_exit(&plun->lun_mutex);
15669 15667 } else {
15670 15668 /*
15671 15669 * lun_cip must be accessed with lun_mutex held. Here
15672 15670 * plun->lun_cip either points to a valid node or it is NULL.
15673 15671 * Make a copy so that we can release lun_mutex.
15674 15672 */
15675 15673 pip = PIP(plun->lun_cip);
15676 15674
15677 15675 /*
15678 15676 * Increase ref count on the path so that we can release
15679 15677 * lun_mutex and still be sure that the pathinfo node (and thus
15680 15678 * also the client) is not deallocated. If pip is NULL, this
15681 15679 * has no effect.
15682 15680 */
15683 15681 mdi_hold_path(pip);
15684 15682
15685 15683 mutex_exit(&plun->lun_mutex);
15686 15684
15687 15685 /* Get the client. If pip is NULL, we get NULL. */
15688 15686 dip = mdi_pi_get_client(pip);
15689 15687 }
15690 15688
15691 15689 if (dip == NULL)
15692 15690 goto out;
15693 15691 if (ddi_get_instance(dip) < 0)
15694 15692 goto out;
15695 15693
15696 15694 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15697 15695 if (path == NULL)
15698 15696 goto out;
15699 15697
15700 15698 (void) ddi_pathname(dip, path);
15701 15699
15702 15700 /* Clean up. */
15703 15701 out:
15704 15702 if (pip != NULL)
15705 15703 mdi_rele_path(pip);
15706 15704
15707 15705 /*
15708 15706 * In reality, the user wants a fully valid path (one they can open)
15709 15707 * but this string is lacking the mount point, and the minor node.
15710 15708 * It would be nice if we could "figure these out" somehow
15711 15709 * and fill them in. Otherwise, the userland code has to understand
15712 15710 * driver specific details of which minor node is the "best" or
15713 15711 * "right" one to expose. (Ex: which slice is the whole disk, or
15714 15712 * which tape doesn't rewind)
15715 15713 */
15716 15714 return (path);
15717 15715 }
15718 15716
15719 15717 static int
15720 15718 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15721 15719 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15722 15720 {
15723 15721 int64_t reset_delay;
15724 15722 int rval, retry = 0;
15725 15723 struct fcp_port *pptr = fcp_dip2port(parent);
15726 15724
15727 15725 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15728 15726 (ddi_get_lbolt64() - pptr->port_attach_time);
15729 15727 if (reset_delay < 0) {
15730 15728 reset_delay = 0;
15731 15729 }
15732 15730
15733 15731 if (fcp_bus_config_debug) {
15734 15732 flag |= NDI_DEVI_DEBUG;
15735 15733 }
15736 15734
15737 15735 switch (op) {
15738 15736 case BUS_CONFIG_ONE:
15739 15737 /*
15740 15738 * Retry the command since we need to ensure
15741 15739 * the fabric devices are available for root
15742 15740 */
15743 15741 while (retry++ < fcp_max_bus_config_retries) {
15744 15742 rval = (ndi_busop_bus_config(parent,
15745 15743 flag | NDI_MDI_FALLBACK, op,
15746 15744 arg, childp, (clock_t)reset_delay));
15747 15745 if (rval == 0) {
15748 15746 return (rval);
15749 15747 }
15750 15748 }
15751 15749
15752 15750 /*
15753 15751 * drain taskq to make sure nodes are created and then
15754 15752 * try again.
15755 15753 */
15756 15754 taskq_wait(DEVI(parent)->devi_taskq);
15757 15755 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15758 15756 op, arg, childp, 0));
15759 15757
15760 15758 case BUS_CONFIG_DRIVER:
15761 15759 case BUS_CONFIG_ALL: {
15762 15760 /*
15763 15761 * delay till all devices report in (port_tmp_cnt == 0)
15764 15762 * or FCP_INIT_WAIT_TIMEOUT
15765 15763 */
15766 15764 mutex_enter(&pptr->port_mutex);
15767 15765 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15768 15766 (void) cv_timedwait(&pptr->port_config_cv,
15769 15767 &pptr->port_mutex,
15770 15768 ddi_get_lbolt() + (clock_t)reset_delay);
15771 15769 reset_delay =
15772 15770 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15773 15771 (ddi_get_lbolt64() - pptr->port_attach_time);
15774 15772 }
15775 15773 mutex_exit(&pptr->port_mutex);
15776 15774 /* drain taskq to make sure nodes are created */
15777 15775 taskq_wait(DEVI(parent)->devi_taskq);
15778 15776 return (ndi_busop_bus_config(parent, flag, op,
15779 15777 arg, childp, 0));
15780 15778 }
15781 15779
15782 15780 default:
15783 15781 return (NDI_FAILURE);
15784 15782 }
15785 15783 /*NOTREACHED*/
15786 15784 }
15787 15785
15788 15786 static int
15789 15787 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15790 15788 ddi_bus_config_op_t op, void *arg)
15791 15789 {
15792 15790 if (fcp_bus_config_debug) {
15793 15791 flag |= NDI_DEVI_DEBUG;
15794 15792 }
15795 15793
15796 15794 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15797 15795 }
15798 15796
15799 15797
15800 15798 /*
15801 15799 * Routine to copy GUID into the lun structure.
15802 15800 * returns 0 if copy was successful and 1 if encountered a
15803 15801 * failure and did not copy the guid.
15804 15802 */
15805 15803 static int
15806 15804 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15807 15805 {
15808 15806
15809 15807 int retval = 0;
15810 15808
15811 15809 /* add one for the null terminator */
15812 15810 const unsigned int len = strlen(guidp) + 1;
15813 15811
15814 15812 if ((guidp == NULL) || (plun == NULL)) {
15815 15813 return (1);
15816 15814 }
15817 15815
15818 15816 /*
15819 15817 * if the plun->lun_guid already has been allocated,
15820 15818 * then check the size. if the size is exact, reuse
15821 15819 * it....if not free it an allocate the required size.
15822 15820 * The reallocation should NOT typically happen
15823 15821 * unless the GUIDs reported changes between passes.
15824 15822 * We free up and alloc again even if the
15825 15823 * size was more than required. This is due to the
15826 15824 * fact that the field lun_guid_size - serves
15827 15825 * dual role of indicating the size of the wwn
15828 15826 * size and ALSO the allocation size.
15829 15827 */
15830 15828 if (plun->lun_guid) {
15831 15829 if (plun->lun_guid_size != len) {
15832 15830 /*
15833 15831 * free the allocated memory and
15834 15832 * initialize the field
15835 15833 * lun_guid_size to 0.
15836 15834 */
15837 15835 kmem_free(plun->lun_guid, plun->lun_guid_size);
15838 15836 plun->lun_guid = NULL;
15839 15837 plun->lun_guid_size = 0;
15840 15838 }
15841 15839 }
15842 15840 /*
15843 15841 * alloc only if not already done.
15844 15842 */
15845 15843 if (plun->lun_guid == NULL) {
15846 15844 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15847 15845 if (plun->lun_guid == NULL) {
15848 15846 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15849 15847 "Unable to allocate"
15850 15848 "Memory for GUID!!! size %d", len);
15851 15849 retval = 1;
15852 15850 } else {
15853 15851 plun->lun_guid_size = len;
15854 15852 }
15855 15853 }
15856 15854 if (plun->lun_guid) {
15857 15855 /*
15858 15856 * now copy the GUID
15859 15857 */
15860 15858 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15861 15859 }
15862 15860 return (retval);
15863 15861 }
15864 15862
15865 15863 /*
15866 15864 * fcp_reconfig_wait
15867 15865 *
15868 15866 * Wait for a rediscovery/reconfiguration to complete before continuing.
15869 15867 */
15870 15868
15871 15869 static void
15872 15870 fcp_reconfig_wait(struct fcp_port *pptr)
15873 15871 {
15874 15872 clock_t reconfig_start, wait_timeout;
15875 15873
15876 15874 /*
15877 15875 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15878 15876 * reconfiguration in progress.
15879 15877 */
15880 15878
15881 15879 mutex_enter(&pptr->port_mutex);
15882 15880 if (pptr->port_tmp_cnt == 0) {
15883 15881 mutex_exit(&pptr->port_mutex);
15884 15882 return;
15885 15883 }
15886 15884 mutex_exit(&pptr->port_mutex);
15887 15885
15888 15886 /*
15889 15887 * If we cause a reconfig by raising power, delay until all devices
15890 15888 * report in (port_tmp_cnt returns to 0)
15891 15889 */
15892 15890
15893 15891 reconfig_start = ddi_get_lbolt();
15894 15892 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15895 15893
15896 15894 mutex_enter(&pptr->port_mutex);
15897 15895
15898 15896 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15899 15897 pptr->port_tmp_cnt) {
15900 15898
15901 15899 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15902 15900 reconfig_start + wait_timeout);
15903 15901 }
15904 15902
15905 15903 mutex_exit(&pptr->port_mutex);
15906 15904
15907 15905 /*
15908 15906 * Even if fcp_tmp_count isn't 0, continue without error. The port
15909 15907 * we want may still be ok. If not, it will error out later
15910 15908 */
15911 15909 }
15912 15910
15913 15911 /*
15914 15912 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15915 15913 * We rely on the fcp_global_mutex to provide protection against changes to
15916 15914 * the fcp_lun_blacklist.
15917 15915 *
15918 15916 * You can describe a list of target port WWNs and LUN numbers which will
15919 15917 * not be configured. LUN numbers will be interpreted as decimal. White
15920 15918 * spaces and ',' can be used in the list of LUN numbers.
15921 15919 *
15922 15920 * To prevent LUNs 1 and 2 from being configured for target
15923 15921 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15924 15922 *
15925 15923 * pwwn-lun-blacklist=
15926 15924 * "510000f010fd92a1,1,2",
15927 15925 * "510000e012079df1,1,2";
15928 15926 */
15929 15927 static void
15930 15928 fcp_read_blacklist(dev_info_t *dip,
15931 15929 struct fcp_black_list_entry **pplun_blacklist) {
15932 15930 char **prop_array = NULL;
15933 15931 char *curr_pwwn = NULL;
15934 15932 char *curr_lun = NULL;
15935 15933 uint32_t prop_item = 0;
15936 15934 int idx = 0;
15937 15935 int len = 0;
15938 15936
15939 15937 ASSERT(mutex_owned(&fcp_global_mutex));
15940 15938 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15941 15939 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15942 15940 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15943 15941 return;
15944 15942 }
15945 15943
15946 15944 for (idx = 0; idx < prop_item; idx++) {
15947 15945
15948 15946 curr_pwwn = prop_array[idx];
15949 15947 while (*curr_pwwn == ' ') {
15950 15948 curr_pwwn++;
15951 15949 }
15952 15950 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15953 15951 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15954 15952 ", please check.", curr_pwwn);
15955 15953 continue;
15956 15954 }
15957 15955 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15958 15956 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15959 15957 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15960 15958 ", please check.", curr_pwwn);
15961 15959 continue;
15962 15960 }
15963 15961 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15964 15962 if (isxdigit(curr_pwwn[len]) != TRUE) {
15965 15963 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15966 15964 "blacklist, please check.", curr_pwwn);
15967 15965 break;
15968 15966 }
15969 15967 }
15970 15968 if (len != sizeof (la_wwn_t) * 2) {
15971 15969 continue;
15972 15970 }
15973 15971
15974 15972 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15975 15973 *(curr_lun - 1) = '\0';
15976 15974 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15977 15975 }
15978 15976
15979 15977 ddi_prop_free(prop_array);
15980 15978 }
15981 15979
15982 15980 /*
15983 15981 * Get the masking info about one remote target port designated by wwn.
15984 15982 * Lun ids could be separated by ',' or white spaces.
15985 15983 */
15986 15984 static void
15987 15985 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15988 15986 struct fcp_black_list_entry **pplun_blacklist) {
15989 15987 int idx = 0;
15990 15988 uint32_t offset = 0;
15991 15989 unsigned long lun_id = 0;
15992 15990 char lunid_buf[16];
15993 15991 char *pend = NULL;
15994 15992 int illegal_digit = 0;
15995 15993
15996 15994 while (offset < strlen(curr_lun)) {
15997 15995 while ((curr_lun[offset + idx] != ',') &&
15998 15996 (curr_lun[offset + idx] != '\0') &&
15999 15997 (curr_lun[offset + idx] != ' ')) {
16000 15998 if (isdigit(curr_lun[offset + idx]) == 0) {
16001 15999 illegal_digit++;
16002 16000 }
16003 16001 idx++;
16004 16002 }
16005 16003 if (illegal_digit > 0) {
16006 16004 offset += (idx+1); /* To the start of next lun */
16007 16005 idx = 0;
16008 16006 illegal_digit = 0;
16009 16007 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16010 16008 "the blacklist, please check digits.",
16011 16009 curr_lun, curr_pwwn);
16012 16010 continue;
16013 16011 }
16014 16012 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16015 16013 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16016 16014 "the blacklist, please check the length of LUN#.",
16017 16015 curr_lun, curr_pwwn);
16018 16016 break;
16019 16017 }
16020 16018 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16021 16019 offset++;
16022 16020 continue;
16023 16021 }
16024 16022
16025 16023 bcopy(curr_lun + offset, lunid_buf, idx);
16026 16024 lunid_buf[idx] = '\0';
16027 16025 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16028 16026 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16029 16027 } else {
16030 16028 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16031 16029 "the blacklist, please check %s.",
16032 16030 curr_lun, curr_pwwn, lunid_buf);
16033 16031 }
16034 16032 offset += (idx+1); /* To the start of next lun */
16035 16033 idx = 0;
16036 16034 }
16037 16035 }
16038 16036
16039 16037 /*
16040 16038 * Add one masking record
16041 16039 */
16042 16040 static void
16043 16041 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16044 16042 struct fcp_black_list_entry **pplun_blacklist) {
16045 16043 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16046 16044 struct fcp_black_list_entry *new_entry = NULL;
16047 16045 la_wwn_t wwn;
16048 16046
16049 16047 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16050 16048 while (tmp_entry) {
16051 16049 if ((bcmp(&tmp_entry->wwn, &wwn,
16052 16050 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16053 16051 return;
16054 16052 }
16055 16053
16056 16054 tmp_entry = tmp_entry->next;
16057 16055 }
16058 16056
16059 16057 /* add to black list */
16060 16058 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16061 16059 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16062 16060 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16063 16061 new_entry->lun = lun_id;
16064 16062 new_entry->masked = 0;
16065 16063 new_entry->next = *pplun_blacklist;
16066 16064 *pplun_blacklist = new_entry;
16067 16065 }
16068 16066
16069 16067 /*
16070 16068 * Check if we should mask the specified lun of this fcp_tgt
16071 16069 */
16072 16070 static int
16073 16071 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16074 16072 struct fcp_black_list_entry *remote_port;
16075 16073
16076 16074 remote_port = fcp_lun_blacklist;
16077 16075 while (remote_port != NULL) {
16078 16076 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16079 16077 if (remote_port->lun == lun_id) {
16080 16078 remote_port->masked++;
16081 16079 if (remote_port->masked == 1) {
16082 16080 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16083 16081 "%02x%02x%02x%02x%02x%02x%02x%02x "
16084 16082 "is masked due to black listing.\n",
16085 16083 lun_id, wwn->raw_wwn[0],
16086 16084 wwn->raw_wwn[1], wwn->raw_wwn[2],
16087 16085 wwn->raw_wwn[3], wwn->raw_wwn[4],
16088 16086 wwn->raw_wwn[5], wwn->raw_wwn[6],
16089 16087 wwn->raw_wwn[7]);
16090 16088 }
16091 16089 return (TRUE);
16092 16090 }
16093 16091 }
16094 16092 remote_port = remote_port->next;
16095 16093 }
16096 16094 return (FALSE);
16097 16095 }
16098 16096
16099 16097 /*
16100 16098 * Release all allocated resources
16101 16099 */
16102 16100 static void
16103 16101 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16104 16102 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16105 16103 struct fcp_black_list_entry *current_entry = NULL;
16106 16104
16107 16105 ASSERT(mutex_owned(&fcp_global_mutex));
16108 16106 /*
16109 16107 * Traverse all luns
16110 16108 */
16111 16109 while (tmp_entry) {
16112 16110 current_entry = tmp_entry;
16113 16111 tmp_entry = tmp_entry->next;
16114 16112 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16115 16113 }
16116 16114 *pplun_blacklist = NULL;
16117 16115 }
16118 16116
16119 16117 /*
16120 16118 * In fcp module,
16121 16119 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16122 16120 */
16123 16121 static struct scsi_pkt *
16124 16122 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16125 16123 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16126 16124 int flags, int (*callback)(), caddr_t arg)
16127 16125 {
16128 16126 fcp_port_t *pptr = ADDR2FCP(ap);
16129 16127 fcp_pkt_t *cmd = NULL;
16130 16128 fc_frame_hdr_t *hp;
16131 16129
16132 16130 /*
16133 16131 * First step: get the packet
16134 16132 */
16135 16133 if (pkt == NULL) {
16136 16134 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16137 16135 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16138 16136 callback, arg);
16139 16137 if (pkt == NULL) {
16140 16138 return (NULL);
16141 16139 }
16142 16140
16143 16141 /*
16144 16142 * All fields in scsi_pkt will be initialized properly or
16145 16143 * set to zero. We need do nothing for scsi_pkt.
16146 16144 */
16147 16145 /*
16148 16146 * But it's our responsibility to link other related data
16149 16147 * structures. Their initialization will be done, just
16150 16148 * before the scsi_pkt will be sent to FCA.
16151 16149 */
16152 16150 cmd = PKT2CMD(pkt);
16153 16151 cmd->cmd_pkt = pkt;
16154 16152 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16155 16153 /*
16156 16154 * fc_packet_t
16157 16155 */
16158 16156 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16159 16157 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16160 16158 sizeof (struct fcp_pkt));
16161 16159 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16162 16160 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16163 16161 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16164 16162 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16165 16163 /*
16166 16164 * Fill in the Fabric Channel Header
16167 16165 */
16168 16166 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16169 16167 hp->r_ctl = R_CTL_COMMAND;
16170 16168 hp->rsvd = 0;
16171 16169 hp->type = FC_TYPE_SCSI_FCP;
16172 16170 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16173 16171 hp->seq_id = 0;
16174 16172 hp->df_ctl = 0;
16175 16173 hp->seq_cnt = 0;
16176 16174 hp->ox_id = 0xffff;
16177 16175 hp->rx_id = 0xffff;
16178 16176 hp->ro = 0;
16179 16177 } else {
16180 16178 /*
16181 16179 * We need think if we should reset any elements in
16182 16180 * related data structures.
16183 16181 */
16184 16182 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16185 16183 fcp_trace, FCP_BUF_LEVEL_6, 0,
16186 16184 "reusing pkt, flags %d", flags);
16187 16185 cmd = PKT2CMD(pkt);
16188 16186 if (cmd->cmd_fp_pkt->pkt_pd) {
16189 16187 cmd->cmd_fp_pkt->pkt_pd = NULL;
16190 16188 }
16191 16189 }
16192 16190
16193 16191 /*
16194 16192 * Second step: dma allocation/move
16195 16193 */
16196 16194 if (bp && bp->b_bcount != 0) {
16197 16195 /*
16198 16196 * Mark if it's read or write
16199 16197 */
16200 16198 if (bp->b_flags & B_READ) {
16201 16199 cmd->cmd_flags |= CFLAG_IS_READ;
16202 16200 } else {
16203 16201 cmd->cmd_flags &= ~CFLAG_IS_READ;
16204 16202 }
16205 16203
16206 16204 bp_mapin(bp);
16207 16205 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16208 16206 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16209 16207 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16210 16208 } else {
16211 16209 /*
16212 16210 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16213 16211 * to send zero-length read/write.
16214 16212 */
16215 16213 cmd->cmd_fp_pkt->pkt_data = NULL;
16216 16214 cmd->cmd_fp_pkt->pkt_datalen = 0;
16217 16215 }
16218 16216
16219 16217 return (pkt);
16220 16218 }
16221 16219
16222 16220 static void
16223 16221 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16224 16222 {
16225 16223 fcp_port_t *pptr = ADDR2FCP(ap);
16226 16224
16227 16225 /*
16228 16226 * First we let FCA to uninitilize private part.
16229 16227 */
16230 16228 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16231 16229 PKT2CMD(pkt)->cmd_fp_pkt);
16232 16230
16233 16231 /*
16234 16232 * Then we uninitialize fc_packet.
16235 16233 */
16236 16234
16237 16235 /*
16238 16236 * Thirdly, we uninitializae fcp_pkt.
16239 16237 */
16240 16238
16241 16239 /*
16242 16240 * In the end, we free scsi_pkt.
16243 16241 */
16244 16242 scsi_hba_pkt_free(ap, pkt);
16245 16243 }
16246 16244
16247 16245 static int
16248 16246 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16249 16247 {
16250 16248 fcp_port_t *pptr = ADDR2FCP(ap);
16251 16249 fcp_lun_t *plun = ADDR2LUN(ap);
16252 16250 fcp_tgt_t *ptgt = plun->lun_tgt;
16253 16251 fcp_pkt_t *cmd = PKT2CMD(pkt);
16254 16252 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16255 16253 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16256 16254 int rval;
16257 16255
16258 16256 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16259 16257 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16260 16258
16261 16259 /*
16262 16260 * Firstly, we need initialize fcp_pkt_t
16263 16261 * Secondly, we need initialize fcp_cmd_t.
16264 16262 */
16265 16263 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16266 16264 fcmd->fcp_data_len = fpkt->pkt_datalen;
16267 16265 fcmd->fcp_ent_addr = plun->lun_addr;
16268 16266 if (pkt->pkt_flags & FLAG_HTAG) {
16269 16267 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16270 16268 } else if (pkt->pkt_flags & FLAG_OTAG) {
16271 16269 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16272 16270 } else if (pkt->pkt_flags & FLAG_STAG) {
16273 16271 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16274 16272 } else {
16275 16273 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16276 16274 }
16277 16275
16278 16276 if (cmd->cmd_flags & CFLAG_IS_READ) {
16279 16277 fcmd->fcp_cntl.cntl_read_data = 1;
16280 16278 fcmd->fcp_cntl.cntl_write_data = 0;
16281 16279 } else {
16282 16280 fcmd->fcp_cntl.cntl_read_data = 0;
16283 16281 fcmd->fcp_cntl.cntl_write_data = 1;
16284 16282 }
16285 16283
16286 16284 /*
16287 16285 * Then we need initialize fc_packet_t too.
16288 16286 */
16289 16287 fpkt->pkt_timeout = pkt->pkt_time + 2;
16290 16288 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16291 16289 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16292 16290 if (cmd->cmd_flags & CFLAG_IS_READ) {
16293 16291 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16294 16292 } else {
16295 16293 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16296 16294 }
16297 16295
16298 16296 if (pkt->pkt_flags & FLAG_NOINTR) {
16299 16297 fpkt->pkt_comp = NULL;
16300 16298 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16301 16299 } else {
16302 16300 fpkt->pkt_comp = fcp_cmd_callback;
16303 16301 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16304 16302 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16305 16303 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16306 16304 }
16307 16305 }
16308 16306
16309 16307 /*
16310 16308 * Lastly, we need initialize scsi_pkt
16311 16309 */
16312 16310 pkt->pkt_reason = CMD_CMPLT;
16313 16311 pkt->pkt_state = 0;
16314 16312 pkt->pkt_statistics = 0;
16315 16313 pkt->pkt_resid = 0;
16316 16314
16317 16315 /*
16318 16316 * if interrupts aren't allowed (e.g. at dump time) then we'll
16319 16317 * have to do polled I/O
16320 16318 */
16321 16319 if (pkt->pkt_flags & FLAG_NOINTR) {
16322 16320 return (fcp_dopoll(pptr, cmd));
16323 16321 }
16324 16322
16325 16323 cmd->cmd_state = FCP_PKT_ISSUED;
16326 16324 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16327 16325 if (rval == FC_SUCCESS) {
16328 16326 return (TRAN_ACCEPT);
16329 16327 }
16330 16328
16331 16329 /*
16332 16330 * Need more consideration
16333 16331 *
16334 16332 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16335 16333 */
16336 16334 cmd->cmd_state = FCP_PKT_IDLE;
16337 16335 if (rval == FC_TRAN_BUSY) {
16338 16336 return (TRAN_BUSY);
16339 16337 } else {
16340 16338 return (TRAN_FATAL_ERROR);
16341 16339 }
16342 16340 }
16343 16341
16344 16342 /*
16345 16343 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16346 16344 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16347 16345 */
16348 16346 static void
16349 16347 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16350 16348 {
16351 16349 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16352 16350 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16353 16351 }
16354 16352
16355 16353 /*
16356 16354 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16357 16355 */
16358 16356 static void
16359 16357 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16360 16358 {
16361 16359 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16362 16360 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16363 16361 }
↓ open down ↓ |
4137 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX