Print this page
4786 emlxs shouldn't abuse ddi_get_time(9f)
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_solaris.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
26 + * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 27 */
27 28
28 29
29 30 #define DEF_ICFG 1
30 31
31 32 #include <emlxs.h>
32 33 #include <emlxs_version.h>
33 34
34 35
35 36 char emlxs_revision[] = EMLXS_REVISION;
36 37 char emlxs_version[] = EMLXS_VERSION;
37 38 char emlxs_name[] = EMLXS_NAME;
38 39 char emlxs_label[] = EMLXS_LABEL;
39 40
40 41 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
41 42 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
42 43
43 44 #ifdef MENLO_SUPPORT
44 45 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
45 46 #endif /* MENLO_SUPPORT */
46 47
47 48 static void emlxs_fca_attach(emlxs_hba_t *hba);
48 49 static void emlxs_fca_detach(emlxs_hba_t *hba);
49 50 static void emlxs_drv_banner(emlxs_hba_t *hba);
50 51
51 52 static int32_t emlxs_get_props(emlxs_hba_t *hba);
52 53 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
53 54 uint32_t *pkt_flags);
54 55 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
55 56 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
56 57 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
57 58 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
58 59 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 60 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
60 61 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
61 62 static uint32_t emlxs_add_instance(int32_t ddiinst);
62 63 static void emlxs_iodone(emlxs_buf_t *sbp);
63 64 static int emlxs_pm_lower_power(dev_info_t *dip);
64 65 static int emlxs_pm_raise_power(dev_info_t *dip);
65 66 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
66 67 uint32_t failed);
67 68 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
68 69 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
69 70 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
70 71 uint32_t args, uint32_t *arg);
71 72
72 73 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
73 74 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
74 75 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
75 76
76 77
77 78
78 79 extern int
79 80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
80 81 extern int
81 82 emlxs_select_msiid(emlxs_hba_t *hba);
82 83
83 84 /*
84 85 * Driver Entry Routines.
85 86 */
86 87 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
87 88 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
88 89 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
89 90 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *);
90 91 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
91 92 cred_t *, int32_t *);
92 93 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
93 94
94 95
95 96 /*
96 97 * FC_AL Transport Functions.
97 98 */
98 99 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
99 100 fc_fca_bind_info_t *);
100 101 static void emlxs_fca_unbind_port(opaque_t);
101 102 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
102 103 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *);
103 104 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *);
104 105 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
105 106 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
106 107 uint32_t *, uint32_t);
107 108 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
108 109
109 110 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t);
110 111 static int32_t emlxs_fca_notify(opaque_t, uint32_t);
111 112 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
112 113
113 114 /*
114 115 * Driver Internal Functions.
115 116 */
116 117
117 118 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
118 119 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t);
119 120 #ifdef EMLXS_I386
120 121 #ifdef S11
121 122 static int32_t emlxs_quiesce(dev_info_t *);
122 123 #endif
123 124 #endif
124 125 static int32_t emlxs_hba_resume(dev_info_t *);
125 126 static int32_t emlxs_hba_suspend(dev_info_t *);
126 127 static int32_t emlxs_hba_detach(dev_info_t *);
127 128 static int32_t emlxs_hba_attach(dev_info_t *);
128 129 static void emlxs_lock_destroy(emlxs_hba_t *);
129 130 static void emlxs_lock_init(emlxs_hba_t *);
130 131
131 132 char *emlxs_pm_components[] = {
132 133 "NAME=emlxx000",
133 134 "0=Device D3 State",
134 135 "1=Device D0 State"
135 136 };
136 137
137 138
138 139 /*
139 140 * Default emlx dma limits
140 141 */
141 142 ddi_dma_lim_t emlxs_dma_lim = {
142 143 (uint32_t)0, /* dlim_addr_lo */
143 144 (uint32_t)0xffffffff, /* dlim_addr_hi */
144 145 (uint_t)0x00ffffff, /* dlim_cntr_max */
145 146 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */
146 147 1, /* dlim_minxfer */
147 148 0x00ffffff /* dlim_dmaspeed */
148 149 };
149 150
150 151 /*
151 152 * Be careful when using these attributes; the defaults listed below are
152 153 * (almost) the most general case, permitting allocation in almost any
153 154 * way supported by the LightPulse family. The sole exception is the
154 155 * alignment specified as requiring memory allocation on a 4-byte boundary;
155 156 * the Lightpulse can DMA memory on any byte boundary.
156 157 *
157 158 * The LightPulse family currently is limited to 16M transfers;
158 159 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
159 160 */
160 161 ddi_dma_attr_t emlxs_dma_attr = {
161 162 DMA_ATTR_V0, /* dma_attr_version */
162 163 (uint64_t)0, /* dma_attr_addr_lo */
163 164 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
164 165 (uint64_t)0x00ffffff, /* dma_attr_count_max */
165 166 1, /* dma_attr_align */
166 167 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
167 168 1, /* dma_attr_minxfer */
168 169 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
169 170 (uint64_t)0xffffffff, /* dma_attr_seg */
170 171 EMLXS_SGLLEN, /* dma_attr_sgllen */
171 172 1, /* dma_attr_granular */
172 173 0 /* dma_attr_flags */
173 174 };
174 175
175 176 ddi_dma_attr_t emlxs_dma_attr_ro = {
176 177 DMA_ATTR_V0, /* dma_attr_version */
177 178 (uint64_t)0, /* dma_attr_addr_lo */
178 179 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
179 180 (uint64_t)0x00ffffff, /* dma_attr_count_max */
180 181 1, /* dma_attr_align */
181 182 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
182 183 1, /* dma_attr_minxfer */
183 184 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
184 185 (uint64_t)0xffffffff, /* dma_attr_seg */
185 186 EMLXS_SGLLEN, /* dma_attr_sgllen */
186 187 1, /* dma_attr_granular */
187 188 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
188 189 };
189 190
190 191 ddi_dma_attr_t emlxs_dma_attr_1sg = {
191 192 DMA_ATTR_V0, /* dma_attr_version */
192 193 (uint64_t)0, /* dma_attr_addr_lo */
193 194 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
194 195 (uint64_t)0x00ffffff, /* dma_attr_count_max */
195 196 1, /* dma_attr_align */
196 197 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
197 198 1, /* dma_attr_minxfer */
198 199 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
199 200 (uint64_t)0xffffffff, /* dma_attr_seg */
200 201 1, /* dma_attr_sgllen */
201 202 1, /* dma_attr_granular */
202 203 0 /* dma_attr_flags */
203 204 };
204 205
205 206 #if (EMLXS_MODREV >= EMLXS_MODREV3)
206 207 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
207 208 DMA_ATTR_V0, /* dma_attr_version */
208 209 (uint64_t)0, /* dma_attr_addr_lo */
209 210 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
210 211 (uint64_t)0x00ffffff, /* dma_attr_count_max */
211 212 1, /* dma_attr_align */
212 213 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
213 214 1, /* dma_attr_minxfer */
214 215 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
215 216 (uint64_t)0xffffffff, /* dma_attr_seg */
216 217 EMLXS_SGLLEN, /* dma_attr_sgllen */
217 218 1, /* dma_attr_granular */
218 219 0 /* dma_attr_flags */
219 220 };
220 221 #endif /* >= EMLXS_MODREV3 */
221 222
222 223 /*
223 224 * DDI access attributes for device
224 225 */
225 226 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
226 227 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
227 228 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */
228 229 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
229 230 DDI_DEFAULT_ACC /* devacc_attr_access */
230 231 };
231 232
232 233 /*
233 234 * DDI access attributes for data
234 235 */
235 236 ddi_device_acc_attr_t emlxs_data_acc_attr = {
236 237 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
237 238 DDI_NEVERSWAP_ACC, /* don't swap for Data */
238 239 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
239 240 DDI_DEFAULT_ACC /* devacc_attr_access */
240 241 };
241 242
242 243 /*
243 244 * Fill in the FC Transport structure,
244 245 * as defined in the Fibre Channel Transport Programmming Guide.
245 246 */
246 247 #if (EMLXS_MODREV == EMLXS_MODREV5)
247 248 static fc_fca_tran_t emlxs_fca_tran = {
248 249 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */
249 250 MAX_VPORTS, /* fca numerb of ports */
250 251 sizeof (emlxs_buf_t), /* fca pkt size */
251 252 2048, /* fca cmd max */
252 253 &emlxs_dma_lim, /* fca dma limits */
253 254 0, /* fca iblock, to be filled in later */
254 255 &emlxs_dma_attr, /* fca dma attributes */
255 256 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
256 257 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
257 258 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
258 259 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
259 260 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
260 261 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
261 262 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
262 263 &emlxs_data_acc_attr, /* fca access atributes */
263 264 0, /* fca_num_npivports */
264 265 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */
265 266 emlxs_fca_bind_port,
266 267 emlxs_fca_unbind_port,
267 268 emlxs_fca_pkt_init,
268 269 emlxs_fca_pkt_uninit,
269 270 emlxs_fca_transport,
270 271 emlxs_fca_get_cap,
271 272 emlxs_fca_set_cap,
272 273 emlxs_fca_get_map,
273 274 emlxs_fca_transport,
274 275 emlxs_fca_ub_alloc,
275 276 emlxs_fca_ub_free,
276 277 emlxs_fca_ub_release,
277 278 emlxs_fca_pkt_abort,
278 279 emlxs_fca_reset,
279 280 emlxs_fca_port_manage,
280 281 emlxs_fca_get_device,
281 282 emlxs_fca_notify
282 283 };
283 284 #endif /* EMLXS_MODREV5 */
284 285
285 286
286 287 #if (EMLXS_MODREV == EMLXS_MODREV4)
287 288 static fc_fca_tran_t emlxs_fca_tran = {
288 289 FCTL_FCA_MODREV_4, /* fca_version */
289 290 MAX_VPORTS, /* fca numerb of ports */
290 291 sizeof (emlxs_buf_t), /* fca pkt size */
291 292 2048, /* fca cmd max */
292 293 &emlxs_dma_lim, /* fca dma limits */
293 294 0, /* fca iblock, to be filled in later */
294 295 &emlxs_dma_attr, /* fca dma attributes */
295 296 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
296 297 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
297 298 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
298 299 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
299 300 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
300 301 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
301 302 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
302 303 &emlxs_data_acc_attr, /* fca access atributes */
303 304 emlxs_fca_bind_port,
304 305 emlxs_fca_unbind_port,
305 306 emlxs_fca_pkt_init,
306 307 emlxs_fca_pkt_uninit,
307 308 emlxs_fca_transport,
308 309 emlxs_fca_get_cap,
309 310 emlxs_fca_set_cap,
310 311 emlxs_fca_get_map,
311 312 emlxs_fca_transport,
312 313 emlxs_fca_ub_alloc,
313 314 emlxs_fca_ub_free,
314 315 emlxs_fca_ub_release,
315 316 emlxs_fca_pkt_abort,
316 317 emlxs_fca_reset,
317 318 emlxs_fca_port_manage,
318 319 emlxs_fca_get_device,
319 320 emlxs_fca_notify
320 321 };
321 322 #endif /* EMLXS_MODEREV4 */
322 323
323 324
324 325 #if (EMLXS_MODREV == EMLXS_MODREV3)
325 326 static fc_fca_tran_t emlxs_fca_tran = {
326 327 FCTL_FCA_MODREV_3, /* fca_version */
327 328 MAX_VPORTS, /* fca numerb of ports */
328 329 sizeof (emlxs_buf_t), /* fca pkt size */
329 330 2048, /* fca cmd max */
330 331 &emlxs_dma_lim, /* fca dma limits */
331 332 0, /* fca iblock, to be filled in later */
332 333 &emlxs_dma_attr, /* fca dma attributes */
333 334 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
334 335 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
335 336 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
336 337 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
337 338 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
338 339 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
339 340 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
340 341 &emlxs_data_acc_attr, /* fca access atributes */
341 342 emlxs_fca_bind_port,
342 343 emlxs_fca_unbind_port,
343 344 emlxs_fca_pkt_init,
344 345 emlxs_fca_pkt_uninit,
345 346 emlxs_fca_transport,
346 347 emlxs_fca_get_cap,
347 348 emlxs_fca_set_cap,
348 349 emlxs_fca_get_map,
349 350 emlxs_fca_transport,
350 351 emlxs_fca_ub_alloc,
351 352 emlxs_fca_ub_free,
352 353 emlxs_fca_ub_release,
353 354 emlxs_fca_pkt_abort,
354 355 emlxs_fca_reset,
355 356 emlxs_fca_port_manage,
356 357 emlxs_fca_get_device,
357 358 emlxs_fca_notify
358 359 };
359 360 #endif /* EMLXS_MODREV3 */
360 361
361 362
362 363 #if (EMLXS_MODREV == EMLXS_MODREV2)
363 364 static fc_fca_tran_t emlxs_fca_tran = {
364 365 FCTL_FCA_MODREV_2, /* fca_version */
365 366 MAX_VPORTS, /* number of ports */
366 367 sizeof (emlxs_buf_t), /* pkt size */
367 368 2048, /* max cmds */
368 369 &emlxs_dma_lim, /* DMA limits */
369 370 0, /* iblock, to be filled in later */
370 371 &emlxs_dma_attr, /* dma attributes */
371 372 &emlxs_data_acc_attr, /* access atributes */
372 373 emlxs_fca_bind_port,
373 374 emlxs_fca_unbind_port,
374 375 emlxs_fca_pkt_init,
375 376 emlxs_fca_pkt_uninit,
376 377 emlxs_fca_transport,
377 378 emlxs_fca_get_cap,
378 379 emlxs_fca_set_cap,
379 380 emlxs_fca_get_map,
380 381 emlxs_fca_transport,
381 382 emlxs_fca_ub_alloc,
382 383 emlxs_fca_ub_free,
383 384 emlxs_fca_ub_release,
384 385 emlxs_fca_pkt_abort,
385 386 emlxs_fca_reset,
386 387 emlxs_fca_port_manage,
387 388 emlxs_fca_get_device,
388 389 emlxs_fca_notify
389 390 };
390 391 #endif /* EMLXS_MODREV2 */
391 392
392 393 /*
393 394 * state pointer which the implementation uses as a place to
394 395 * hang a set of per-driver structures;
395 396 *
396 397 */
397 398 void *emlxs_soft_state = NULL;
398 399
399 400 /*
400 401 * Driver Global variables.
401 402 */
402 403 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */
403 404
404 405 emlxs_device_t emlxs_device;
405 406
406 407 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */
407 408 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */
408 409 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */
409 410 #define EMLXS_FW_SHOW 0x00000001
410 411
411 412
412 413 /*
413 414 * Single private "global" lock used to gain access to
414 415 * the hba_list and/or any other case where we want need to be
415 416 * single-threaded.
416 417 */
417 418 uint32_t emlxs_diag_state;
418 419
419 420 /*
420 421 * CB ops vector. Used for administration only.
421 422 */
422 423 static struct cb_ops emlxs_cb_ops = {
423 424 emlxs_open, /* cb_open */
424 425 emlxs_close, /* cb_close */
425 426 nodev, /* cb_strategy */
426 427 nodev, /* cb_print */
427 428 nodev, /* cb_dump */
428 429 nodev, /* cb_read */
429 430 nodev, /* cb_write */
430 431 emlxs_ioctl, /* cb_ioctl */
431 432 nodev, /* cb_devmap */
432 433 nodev, /* cb_mmap */
433 434 nodev, /* cb_segmap */
434 435 nochpoll, /* cb_chpoll */
435 436 ddi_prop_op, /* cb_prop_op */
436 437 0, /* cb_stream */
437 438 #ifdef _LP64
438 439 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
439 440 #else
440 441 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
441 442 #endif
442 443 CB_REV, /* rev */
443 444 nodev, /* cb_aread */
444 445 nodev /* cb_awrite */
445 446 };
446 447
447 448 static struct dev_ops emlxs_ops = {
448 449 DEVO_REV, /* rev */
449 450 0, /* refcnt */
450 451 emlxs_info, /* getinfo */
451 452 nulldev, /* identify */
452 453 nulldev, /* probe */
453 454 emlxs_attach, /* attach */
454 455 emlxs_detach, /* detach */
455 456 nodev, /* reset */
456 457 &emlxs_cb_ops, /* devo_cb_ops */
457 458 NULL, /* devo_bus_ops */
458 459 emlxs_power, /* power ops */
459 460 #ifdef EMLXS_I386
460 461 #ifdef S11
461 462 emlxs_quiesce, /* quiesce */
462 463 #endif
463 464 #endif
464 465 };
465 466
466 467 #include <sys/modctl.h>
467 468 extern struct mod_ops mod_driverops;
468 469
469 470 #ifdef SAN_DIAG_SUPPORT
470 471 extern kmutex_t sd_bucket_mutex;
471 472 extern sd_bucket_info_t sd_bucket;
472 473 #endif /* SAN_DIAG_SUPPORT */
473 474
474 475 /*
475 476 * Module linkage information for the kernel.
476 477 */
477 478 static struct modldrv emlxs_modldrv = {
478 479 &mod_driverops, /* module type - driver */
479 480 emlxs_name, /* module name */
480 481 &emlxs_ops, /* driver ops */
481 482 };
482 483
483 484
484 485 /*
485 486 * Driver module linkage structure
486 487 */
487 488 static struct modlinkage emlxs_modlinkage = {
488 489 MODREV_1, /* ml_rev - must be MODREV_1 */
489 490 &emlxs_modldrv, /* ml_linkage */
490 491 NULL /* end of driver linkage */
491 492 };
492 493
493 494
494 495 /* We only need to add entries for non-default return codes. */
495 496 /* Entries do not need to be in order. */
496 497 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
497 498 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */
498 499
499 500 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
500 501 /* {f/w code, pkt_state, pkt_reason, */
501 502 /* pkt_expln, pkt_action} */
502 503
503 504 /* 0x00 - Do not remove */
504 505 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
505 506 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506 507
507 508 /* 0x01 - Do not remove */
508 509 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
509 510 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
510 511
511 512 /* 0x02 */
512 513 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
513 514 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
514 515
515 516 /*
516 517 * This is a default entry.
517 518 * The real codes are written dynamically in emlxs_els.c
518 519 */
519 520 /* 0x09 */
520 521 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
521 522 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
522 523
523 524 /* Special error code */
524 525 /* 0x10 */
525 526 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
526 527 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
527 528
528 529 /* Special error code */
529 530 /* 0x11 */
530 531 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
531 532 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
532 533
533 534 /* CLASS 2 only */
534 535 /* 0x04 */
535 536 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
536 537 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
537 538
538 539 /* CLASS 2 only */
539 540 /* 0x05 */
540 541 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
541 542 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
542 543
543 544 /* CLASS 2 only */
544 545 /* 0x06 */
545 546 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
546 547 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
547 548
548 549 /* CLASS 2 only */
549 550 /* 0x07 */
550 551 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
551 552 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
552 553 };
553 554
554 555 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
555 556
556 557
557 558 /* We only need to add entries for non-default return codes. */
558 559 /* Entries do not need to be in order. */
559 560 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
560 561 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */
561 562
562 563 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
563 564 /* {f/w code, pkt_state, pkt_reason, */
564 565 /* pkt_expln, pkt_action} */
565 566
566 567 /* 0x01 */
567 568 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
568 569 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
569 570
570 571 /* 0x02 */
571 572 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
572 573 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
573 574
574 575 /* 0x04 */
575 576 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
576 577 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
577 578
578 579 /* 0x05 */
579 580 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
580 581 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
581 582
582 583 /* 0x06 */
583 584 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
584 585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
585 586
586 587 /* 0x07 */
587 588 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
588 589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
589 590
590 591 /* 0x08 */
591 592 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
592 593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
593 594
594 595 /* 0x0B */
595 596 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
596 597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
597 598
598 599 /* 0x0D */
599 600 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
600 601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
601 602
602 603 /* 0x0E */
603 604 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
604 605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
605 606
606 607 /* 0x0F */
607 608 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
608 609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
609 610
610 611 /* 0x11 */
611 612 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
612 613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
613 614
614 615 /* 0x13 */
615 616 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
616 617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
617 618
618 619 /* 0x14 */
619 620 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
620 621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
621 622
622 623 /* 0x15 */
623 624 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
624 625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
625 626
626 627 /* 0x16 */
627 628 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
628 629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
629 630
630 631 /* 0x17 */
631 632 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
632 633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
633 634
634 635 /* 0x18 */
635 636 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
636 637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
637 638
638 639 /* 0x1A */
639 640 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
640 641 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
641 642
642 643 /* 0x21 */
643 644 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
644 645 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
645 646
646 647 /* Occurs at link down */
647 648 /* 0x28 */
648 649 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
649 650 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
650 651
651 652 /* 0xF0 */
652 653 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
653 654 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
654 655 };
655 656
656 657 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
657 658
658 659
659 660
660 661 emlxs_table_t emlxs_error_table[] = {
661 662 {IOERR_SUCCESS, "No error."},
662 663 {IOERR_MISSING_CONTINUE, "Missing continue."},
663 664 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
664 665 {IOERR_INTERNAL_ERROR, "Internal error."},
665 666 {IOERR_INVALID_RPI, "Invalid RPI."},
666 667 {IOERR_NO_XRI, "No XRI."},
667 668 {IOERR_ILLEGAL_COMMAND, "Illegal command."},
668 669 {IOERR_XCHG_DROPPED, "Exchange dropped."},
669 670 {IOERR_ILLEGAL_FIELD, "Illegal field."},
670 671 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
671 672 {IOERR_TX_DMA_FAILED, "TX DMA failed."},
672 673 {IOERR_RX_DMA_FAILED, "RX DMA failed."},
673 674 {IOERR_ILLEGAL_FRAME, "Illegal frame."},
674 675 {IOERR_NO_RESOURCES, "No resources."},
675 676 {IOERR_ILLEGAL_LENGTH, "Illegal length."},
676 677 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
677 678 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
678 679 {IOERR_ABORT_REQUESTED, "Abort requested."},
679 680 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
680 681 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
681 682 {IOERR_RING_RESET, "Ring reset."},
682 683 {IOERR_LINK_DOWN, "Link down."},
683 684 {IOERR_CORRUPTED_DATA, "Corrupted data."},
684 685 {IOERR_CORRUPTED_RPI, "Corrupted RPI."},
685 686 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
686 687 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
687 688 {IOERR_DUP_FRAME, "Duplicate frame."},
688 689 {IOERR_LINK_CONTROL_FRAME, "Link control frame."},
689 690 {IOERR_BAD_HOST_ADDRESS, "Bad host address."},
690 691 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
691 692 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
692 693 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
693 694 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
694 695 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
695 696 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
696 697 {IOERR_XRIBUF_MISSING, "XRI buffer missing"},
697 698 {IOERR_ROFFSET_INVAL, "Relative offset invalid."},
698 699 {IOERR_ROFFSET_MISSING, "Relative offset missing."},
699 700 {IOERR_INSUF_BUFFER, "Buffer too small."},
700 701 {IOERR_MISSING_SI, "ELS frame missing SI"},
701 702 {IOERR_MISSING_ES, "Exhausted burst without ES"},
702 703 {IOERR_INCOMP_XFER, "Transfer incomplete."},
703 704 {IOERR_ABORT_TIMEOUT, "Abort timeout."}
704 705
705 706 }; /* emlxs_error_table */
706 707
707 708
708 709 emlxs_table_t emlxs_state_table[] = {
709 710 {IOSTAT_SUCCESS, "Success."},
710 711 {IOSTAT_FCP_RSP_ERROR, "FCP response error."},
711 712 {IOSTAT_REMOTE_STOP, "Remote stop."},
712 713 {IOSTAT_LOCAL_REJECT, "Local reject."},
713 714 {IOSTAT_NPORT_RJT, "NPort reject."},
714 715 {IOSTAT_FABRIC_RJT, "Fabric reject."},
715 716 {IOSTAT_NPORT_BSY, "Nport busy."},
716 717 {IOSTAT_FABRIC_BSY, "Fabric busy."},
717 718 {IOSTAT_INTERMED_RSP, "Intermediate response."},
718 719 {IOSTAT_LS_RJT, "LS reject."},
719 720 {IOSTAT_CMD_REJECT, "Cmd reject."},
720 721 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
721 722 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
722 723 {IOSTAT_DATA_UNDERRUN, "Data underrun."},
723 724 {IOSTAT_DATA_OVERRUN, "Data overrun."},
724 725
725 726 }; /* emlxs_state_table */
726 727
727 728
728 729 #ifdef MENLO_SUPPORT
729 730 emlxs_table_t emlxs_menlo_cmd_table[] = {
730 731 {MENLO_CMD_INITIALIZE, "MENLO_INIT"},
731 732 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
732 733 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
733 734 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
734 735 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
735 736 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
736 737
737 738 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
738 739 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
739 740 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
740 741 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
741 742 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
742 743 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
743 744 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
744 745 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
745 746 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
746 747
747 748 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
748 749 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
749 750 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
750 751
751 752 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
752 753 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
753 754
754 755 {MENLO_CMD_RESET, "MENLO_RESET"},
755 756 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
756 757
757 758 }; /* emlxs_menlo_cmd_table */
758 759
759 760 emlxs_table_t emlxs_menlo_rsp_table[] = {
760 761 {MENLO_RSP_SUCCESS, "SUCCESS"},
761 762 {MENLO_ERR_FAILED, "FAILED"},
762 763 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
763 764 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
764 765 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
765 766 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
766 767 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
767 768 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
768 769 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
769 770 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
770 771 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
771 772 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
772 773 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
773 774 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
774 775 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
775 776 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
776 777 {MENLO_ERR_BUSY, "BUSY"},
777 778
778 779 }; /* emlxs_menlo_rsp_table */
779 780
780 781 #endif /* MENLO_SUPPORT */
781 782
782 783
783 784 emlxs_table_t emlxs_mscmd_table[] = {
784 785 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
785 786 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
786 787 {MS_GTIN, "MS_GTIN"},
787 788 {MS_GIEL, "MS_GIEL"},
788 789 {MS_GIET, "MS_GIET"},
789 790 {MS_GDID, "MS_GDID"},
790 791 {MS_GMID, "MS_GMID"},
791 792 {MS_GFN, "MS_GFN"},
792 793 {MS_GIELN, "MS_GIELN"},
793 794 {MS_GMAL, "MS_GMAL"},
794 795 {MS_GIEIL, "MS_GIEIL"},
795 796 {MS_GPL, "MS_GPL"},
796 797 {MS_GPT, "MS_GPT"},
797 798 {MS_GPPN, "MS_GPPN"},
798 799 {MS_GAPNL, "MS_GAPNL"},
799 800 {MS_GPS, "MS_GPS"},
800 801 {MS_GPSC, "MS_GPSC"},
801 802 {MS_GATIN, "MS_GATIN"},
802 803 {MS_GSES, "MS_GSES"},
803 804 {MS_GPLNL, "MS_GPLNL"},
804 805 {MS_GPLT, "MS_GPLT"},
805 806 {MS_GPLML, "MS_GPLML"},
806 807 {MS_GPAB, "MS_GPAB"},
807 808 {MS_GNPL, "MS_GNPL"},
808 809 {MS_GPNL, "MS_GPNL"},
809 810 {MS_GPFCP, "MS_GPFCP"},
810 811 {MS_GPLI, "MS_GPLI"},
811 812 {MS_GNID, "MS_GNID"},
812 813 {MS_RIELN, "MS_RIELN"},
813 814 {MS_RPL, "MS_RPL"},
814 815 {MS_RPLN, "MS_RPLN"},
815 816 {MS_RPLT, "MS_RPLT"},
816 817 {MS_RPLM, "MS_RPLM"},
817 818 {MS_RPAB, "MS_RPAB"},
818 819 {MS_RPFCP, "MS_RPFCP"},
819 820 {MS_RPLI, "MS_RPLI"},
820 821 {MS_DPL, "MS_DPL"},
821 822 {MS_DPLN, "MS_DPLN"},
822 823 {MS_DPLM, "MS_DPLM"},
823 824 {MS_DPLML, "MS_DPLML"},
824 825 {MS_DPLI, "MS_DPLI"},
825 826 {MS_DPAB, "MS_DPAB"},
826 827 {MS_DPALL, "MS_DPALL"}
827 828
828 829 }; /* emlxs_mscmd_table */
829 830
830 831
831 832 emlxs_table_t emlxs_ctcmd_table[] = {
832 833 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
833 834 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
834 835 {SLI_CTNS_GA_NXT, "GA_NXT"},
835 836 {SLI_CTNS_GPN_ID, "GPN_ID"},
836 837 {SLI_CTNS_GNN_ID, "GNN_ID"},
837 838 {SLI_CTNS_GCS_ID, "GCS_ID"},
838 839 {SLI_CTNS_GFT_ID, "GFT_ID"},
839 840 {SLI_CTNS_GSPN_ID, "GSPN_ID"},
840 841 {SLI_CTNS_GPT_ID, "GPT_ID"},
841 842 {SLI_CTNS_GID_PN, "GID_PN"},
842 843 {SLI_CTNS_GID_NN, "GID_NN"},
843 844 {SLI_CTNS_GIP_NN, "GIP_NN"},
844 845 {SLI_CTNS_GIPA_NN, "GIPA_NN"},
845 846 {SLI_CTNS_GSNN_NN, "GSNN_NN"},
846 847 {SLI_CTNS_GNN_IP, "GNN_IP"},
847 848 {SLI_CTNS_GIPA_IP, "GIPA_IP"},
848 849 {SLI_CTNS_GID_FT, "GID_FT"},
849 850 {SLI_CTNS_GID_PT, "GID_PT"},
850 851 {SLI_CTNS_RPN_ID, "RPN_ID"},
851 852 {SLI_CTNS_RNN_ID, "RNN_ID"},
852 853 {SLI_CTNS_RCS_ID, "RCS_ID"},
853 854 {SLI_CTNS_RFT_ID, "RFT_ID"},
854 855 {SLI_CTNS_RSPN_ID, "RSPN_ID"},
855 856 {SLI_CTNS_RPT_ID, "RPT_ID"},
856 857 {SLI_CTNS_RIP_NN, "RIP_NN"},
857 858 {SLI_CTNS_RIPA_NN, "RIPA_NN"},
858 859 {SLI_CTNS_RSNN_NN, "RSNN_NN"},
859 860 {SLI_CTNS_DA_ID, "DA_ID"},
860 861 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
861 862
862 863 }; /* emlxs_ctcmd_table */
863 864
864 865
865 866
866 867 emlxs_table_t emlxs_rmcmd_table[] = {
867 868 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
868 869 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
869 870 {CT_OP_GSAT, "RM_GSAT"},
870 871 {CT_OP_GHAT, "RM_GHAT"},
871 872 {CT_OP_GPAT, "RM_GPAT"},
872 873 {CT_OP_GDAT, "RM_GDAT"},
873 874 {CT_OP_GPST, "RM_GPST"},
874 875 {CT_OP_GDP, "RM_GDP"},
875 876 {CT_OP_GDPG, "RM_GDPG"},
876 877 {CT_OP_GEPS, "RM_GEPS"},
877 878 {CT_OP_GLAT, "RM_GLAT"},
878 879 {CT_OP_SSAT, "RM_SSAT"},
879 880 {CT_OP_SHAT, "RM_SHAT"},
880 881 {CT_OP_SPAT, "RM_SPAT"},
881 882 {CT_OP_SDAT, "RM_SDAT"},
882 883 {CT_OP_SDP, "RM_SDP"},
883 884 {CT_OP_SBBS, "RM_SBBS"},
884 885 {CT_OP_RPST, "RM_RPST"},
885 886 {CT_OP_VFW, "RM_VFW"},
886 887 {CT_OP_DFW, "RM_DFW"},
887 888 {CT_OP_RES, "RM_RES"},
888 889 {CT_OP_RHD, "RM_RHD"},
889 890 {CT_OP_UFW, "RM_UFW"},
890 891 {CT_OP_RDP, "RM_RDP"},
891 892 {CT_OP_GHDR, "RM_GHDR"},
892 893 {CT_OP_CHD, "RM_CHD"},
893 894 {CT_OP_SSR, "RM_SSR"},
894 895 {CT_OP_RSAT, "RM_RSAT"},
895 896 {CT_OP_WSAT, "RM_WSAT"},
896 897 {CT_OP_RSAH, "RM_RSAH"},
897 898 {CT_OP_WSAH, "RM_WSAH"},
898 899 {CT_OP_RACT, "RM_RACT"},
899 900 {CT_OP_WACT, "RM_WACT"},
900 901 {CT_OP_RKT, "RM_RKT"},
901 902 {CT_OP_WKT, "RM_WKT"},
902 903 {CT_OP_SSC, "RM_SSC"},
903 904 {CT_OP_QHBA, "RM_QHBA"},
904 905 {CT_OP_GST, "RM_GST"},
905 906 {CT_OP_GFTM, "RM_GFTM"},
906 907 {CT_OP_SRL, "RM_SRL"},
907 908 {CT_OP_SI, "RM_SI"},
908 909 {CT_OP_SRC, "RM_SRC"},
909 910 {CT_OP_GPB, "RM_GPB"},
910 911 {CT_OP_SPB, "RM_SPB"},
911 912 {CT_OP_RPB, "RM_RPB"},
912 913 {CT_OP_RAPB, "RM_RAPB"},
913 914 {CT_OP_GBC, "RM_GBC"},
914 915 {CT_OP_GBS, "RM_GBS"},
915 916 {CT_OP_SBS, "RM_SBS"},
916 917 {CT_OP_GANI, "RM_GANI"},
917 918 {CT_OP_GRV, "RM_GRV"},
918 919 {CT_OP_GAPBS, "RM_GAPBS"},
919 920 {CT_OP_APBC, "RM_APBC"},
920 921 {CT_OP_GDT, "RM_GDT"},
921 922 {CT_OP_GDLMI, "RM_GDLMI"},
922 923 {CT_OP_GANA, "RM_GANA"},
923 924 {CT_OP_GDLV, "RM_GDLV"},
924 925 {CT_OP_GWUP, "RM_GWUP"},
925 926 {CT_OP_GLM, "RM_GLM"},
926 927 {CT_OP_GABS, "RM_GABS"},
927 928 {CT_OP_SABS, "RM_SABS"},
928 929 {CT_OP_RPR, "RM_RPR"},
929 930 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
930 931
931 932 }; /* emlxs_rmcmd_table */
932 933
933 934
934 935 emlxs_table_t emlxs_elscmd_table[] = {
935 936 {ELS_CMD_ACC, "ACC"},
936 937 {ELS_CMD_LS_RJT, "LS_RJT"},
937 938 {ELS_CMD_PLOGI, "PLOGI"},
938 939 {ELS_CMD_FLOGI, "FLOGI"},
939 940 {ELS_CMD_LOGO, "LOGO"},
940 941 {ELS_CMD_ABTX, "ABTX"},
941 942 {ELS_CMD_RCS, "RCS"},
942 943 {ELS_CMD_RES, "RES"},
943 944 {ELS_CMD_RSS, "RSS"},
944 945 {ELS_CMD_RSI, "RSI"},
945 946 {ELS_CMD_ESTS, "ESTS"},
946 947 {ELS_CMD_ESTC, "ESTC"},
947 948 {ELS_CMD_ADVC, "ADVC"},
948 949 {ELS_CMD_RTV, "RTV"},
949 950 {ELS_CMD_RLS, "RLS"},
950 951 {ELS_CMD_ECHO, "ECHO"},
951 952 {ELS_CMD_TEST, "TEST"},
952 953 {ELS_CMD_RRQ, "RRQ"},
953 954 {ELS_CMD_REC, "REC"},
954 955 {ELS_CMD_PRLI, "PRLI"},
955 956 {ELS_CMD_PRLO, "PRLO"},
956 957 {ELS_CMD_SCN, "SCN"},
957 958 {ELS_CMD_TPLS, "TPLS"},
958 959 {ELS_CMD_GPRLO, "GPRLO"},
959 960 {ELS_CMD_GAID, "GAID"},
960 961 {ELS_CMD_FACT, "FACT"},
961 962 {ELS_CMD_FDACT, "FDACT"},
962 963 {ELS_CMD_NACT, "NACT"},
963 964 {ELS_CMD_NDACT, "NDACT"},
964 965 {ELS_CMD_QoSR, "QoSR"},
965 966 {ELS_CMD_RVCS, "RVCS"},
966 967 {ELS_CMD_PDISC, "PDISC"},
967 968 {ELS_CMD_FDISC, "FDISC"},
968 969 {ELS_CMD_ADISC, "ADISC"},
969 970 {ELS_CMD_FARP, "FARP"},
970 971 {ELS_CMD_FARPR, "FARPR"},
971 972 {ELS_CMD_FAN, "FAN"},
972 973 {ELS_CMD_RSCN, "RSCN"},
973 974 {ELS_CMD_SCR, "SCR"},
974 975 {ELS_CMD_LINIT, "LINIT"},
975 976 {ELS_CMD_RNID, "RNID"},
976 977 {ELS_CMD_AUTH, "AUTH"}
977 978
978 979 }; /* emlxs_elscmd_table */
979 980
980 981
981 982 /*
982 983 *
983 984 * Device Driver Entry Routines
984 985 *
985 986 */
986 987
987 988 #ifdef MODSYM_SUPPORT
988 989 static void emlxs_fca_modclose();
989 990 static int emlxs_fca_modopen();
990 991 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */
991 992
992 993 static int
993 994 emlxs_fca_modopen()
994 995 {
995 996 int err;
996 997
997 998 if (emlxs_modsym.mod_fctl) {
998 999 return (0);
999 1000 }
1000 1001
1001 1002 /* Leadville (fctl) */
1002 1003 err = 0;
1003 1004 emlxs_modsym.mod_fctl =
1004 1005 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1005 1006 if (!emlxs_modsym.mod_fctl) {
1006 1007 cmn_err(CE_WARN,
1007 1008 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1008 1009 DRIVER_NAME, err);
1009 1010
1010 1011 goto failed;
1011 1012 }
1012 1013
1013 1014 err = 0;
1014 1015 /* Check if the fctl fc_fca_attach is present */
1015 1016 emlxs_modsym.fc_fca_attach =
1016 1017 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1017 1018 &err);
1018 1019 if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1019 1020 cmn_err(CE_WARN,
1020 1021 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1021 1022 goto failed;
1022 1023 }
1023 1024
1024 1025 err = 0;
1025 1026 /* Check if the fctl fc_fca_detach is present */
1026 1027 emlxs_modsym.fc_fca_detach =
1027 1028 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1028 1029 &err);
1029 1030 if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1030 1031 cmn_err(CE_WARN,
1031 1032 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1032 1033 goto failed;
1033 1034 }
1034 1035
1035 1036 err = 0;
1036 1037 /* Check if the fctl fc_fca_init is present */
1037 1038 emlxs_modsym.fc_fca_init =
1038 1039 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1039 1040 if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1040 1041 cmn_err(CE_WARN,
1041 1042 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1042 1043 goto failed;
1043 1044 }
1044 1045
1045 1046 return (0);
1046 1047
1047 1048 failed:
1048 1049
1049 1050 emlxs_fca_modclose();
1050 1051
1051 1052 return (1);
1052 1053
1053 1054
1054 1055 } /* emlxs_fca_modopen() */
1055 1056
1056 1057
1057 1058 static void
1058 1059 emlxs_fca_modclose()
1059 1060 {
1060 1061 if (emlxs_modsym.mod_fctl) {
1061 1062 (void) ddi_modclose(emlxs_modsym.mod_fctl);
1062 1063 emlxs_modsym.mod_fctl = 0;
1063 1064 }
1064 1065
1065 1066 emlxs_modsym.fc_fca_attach = NULL;
1066 1067 emlxs_modsym.fc_fca_detach = NULL;
1067 1068 emlxs_modsym.fc_fca_init = NULL;
1068 1069
1069 1070 return;
1070 1071
1071 1072 } /* emlxs_fca_modclose() */
1072 1073
1073 1074 #endif /* MODSYM_SUPPORT */
1074 1075
1075 1076
1076 1077
1077 1078 /*
1078 1079 * Global driver initialization, called once when driver is loaded
1079 1080 */
1080 1081 int
1081 1082 _init(void)
1082 1083 {
1083 1084 int ret;
1084 1085 char buf[64];
1085 1086
1086 1087 /*
1087 1088 * First init call for this driver,
1088 1089 * so initialize the emlxs_dev_ctl structure.
1089 1090 */
↓ open down ↓ |
1054 lines elided |
↑ open up ↑ |
1090 1091 bzero(&emlxs_device, sizeof (emlxs_device));
1091 1092
1092 1093 #ifdef MODSYM_SUPPORT
1093 1094 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1094 1095 #endif /* MODSYM_SUPPORT */
1095 1096
1096 1097 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME);
1097 1098 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL);
1098 1099
1099 1100 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1100 - emlxs_device.drv_timestamp = ddi_get_time();
1101 + emlxs_device.drv_timestamp = gethrtime();
1101 1102
1102 1103 for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1103 1104 emlxs_instance[ret] = (uint32_t)-1;
1104 1105 }
1105 1106
1106 1107 /*
1107 1108 * Provide for one ddiinst of the emlxs_dev_ctl structure
1108 1109 * for each possible board in the system.
1109 1110 */
1110 1111 if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1111 1112 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1112 1113 cmn_err(CE_WARN,
1113 1114 "?%s: _init: ddi_soft_state_init failed. rval=%x",
1114 1115 DRIVER_NAME, ret);
1115 1116
1116 1117 return (ret);
1117 1118 }
1118 1119
1119 1120 #ifdef MODSYM_SUPPORT
1120 1121 /* Open SFS */
1121 1122 (void) emlxs_fca_modopen();
1122 1123 #endif /* MODSYM_SUPPORT */
1123 1124
1124 1125 /* Setup devops for SFS */
1125 1126 MODSYM(fc_fca_init)(&emlxs_ops);
1126 1127
1127 1128 if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1128 1129 (void) ddi_soft_state_fini(&emlxs_soft_state);
1129 1130 #ifdef MODSYM_SUPPORT
1130 1131 /* Close SFS */
1131 1132 emlxs_fca_modclose();
1132 1133 #endif /* MODSYM_SUPPORT */
1133 1134
1134 1135 return (ret);
1135 1136 }
1136 1137
1137 1138 #ifdef SAN_DIAG_SUPPORT
1138 1139 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME);
1139 1140 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL);
1140 1141 #endif /* SAN_DIAG_SUPPORT */
1141 1142
1142 1143 return (ret);
1143 1144
1144 1145 } /* _init() */
1145 1146
1146 1147
1147 1148 /*
1148 1149 * Called when driver is unloaded.
1149 1150 */
1150 1151 int
1151 1152 _fini(void)
1152 1153 {
1153 1154 int ret;
1154 1155
1155 1156 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1156 1157 return (ret);
1157 1158 }
1158 1159 #ifdef MODSYM_SUPPORT
1159 1160 /* Close SFS */
1160 1161 emlxs_fca_modclose();
1161 1162 #endif /* MODSYM_SUPPORT */
1162 1163
1163 1164 /*
1164 1165 * Destroy the soft state structure
1165 1166 */
1166 1167 (void) ddi_soft_state_fini(&emlxs_soft_state);
1167 1168
1168 1169 /* Destroy the global device lock */
1169 1170 mutex_destroy(&emlxs_device.lock);
1170 1171
1171 1172 #ifdef SAN_DIAG_SUPPORT
1172 1173 mutex_destroy(&sd_bucket_mutex);
1173 1174 #endif /* SAN_DIAG_SUPPORT */
1174 1175
1175 1176 return (ret);
1176 1177
1177 1178 } /* _fini() */
1178 1179
1179 1180
1180 1181
1181 1182 int
1182 1183 _info(struct modinfo *modinfop)
1183 1184 {
1184 1185
1185 1186 return (mod_info(&emlxs_modlinkage, modinfop));
1186 1187
1187 1188 } /* _info() */
1188 1189
1189 1190
1190 1191 /*
1191 1192 * Attach an ddiinst of an emlx host adapter.
1192 1193 * Allocate data structures, initialize the adapter and we're ready to fly.
1193 1194 */
1194 1195 static int
1195 1196 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1196 1197 {
1197 1198 emlxs_hba_t *hba;
1198 1199 int ddiinst;
1199 1200 int emlxinst;
1200 1201 int rval;
1201 1202
1202 1203 switch (cmd) {
1203 1204 case DDI_ATTACH:
1204 1205 /* If successful this will set EMLXS_PM_IN_ATTACH */
1205 1206 rval = emlxs_hba_attach(dip);
1206 1207 break;
1207 1208
1208 1209 case DDI_PM_RESUME:
1209 1210 /* This will resume the driver */
1210 1211 rval = emlxs_pm_raise_power(dip);
1211 1212 break;
1212 1213
1213 1214 case DDI_RESUME:
1214 1215 /* This will resume the driver */
1215 1216 rval = emlxs_hba_resume(dip);
1216 1217 break;
1217 1218
1218 1219 default:
1219 1220 rval = DDI_FAILURE;
1220 1221 }
1221 1222
1222 1223 if (rval == DDI_SUCCESS) {
1223 1224 ddiinst = ddi_get_instance(dip);
1224 1225 emlxinst = emlxs_get_instance(ddiinst);
1225 1226 hba = emlxs_device.hba[emlxinst];
1226 1227
1227 1228 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1228 1229
1229 1230 /* Enable driver dump feature */
1230 1231 mutex_enter(&EMLXS_PORT_LOCK);
1231 1232 hba->flag |= FC_DUMP_SAFE;
1232 1233 mutex_exit(&EMLXS_PORT_LOCK);
1233 1234 }
1234 1235 }
1235 1236
1236 1237 return (rval);
1237 1238
1238 1239 } /* emlxs_attach() */
1239 1240
1240 1241
1241 1242 /*
1242 1243 * Detach/prepare driver to unload (see detach(9E)).
1243 1244 */
1244 1245 static int
1245 1246 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1246 1247 {
1247 1248 emlxs_hba_t *hba;
1248 1249 emlxs_port_t *port;
1249 1250 int ddiinst;
1250 1251 int emlxinst;
1251 1252 int rval;
1252 1253
1253 1254 ddiinst = ddi_get_instance(dip);
1254 1255 emlxinst = emlxs_get_instance(ddiinst);
1255 1256 hba = emlxs_device.hba[emlxinst];
1256 1257
1257 1258 if (hba == NULL) {
1258 1259 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1259 1260
1260 1261 return (DDI_FAILURE);
1261 1262 }
1262 1263
1263 1264 if (hba == (emlxs_hba_t *)-1) {
1264 1265 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1265 1266 DRIVER_NAME);
1266 1267
1267 1268 return (DDI_FAILURE);
1268 1269 }
1269 1270
1270 1271 port = &PPORT;
1271 1272 rval = DDI_SUCCESS;
1272 1273
1273 1274 /* Check driver dump */
1274 1275 mutex_enter(&EMLXS_PORT_LOCK);
1275 1276
1276 1277 if (hba->flag & FC_DUMP_ACTIVE) {
1277 1278 mutex_exit(&EMLXS_PORT_LOCK);
1278 1279
1279 1280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1280 1281 "emlxs_detach: Driver busy. Driver dump active.");
1281 1282
1282 1283 return (DDI_FAILURE);
1283 1284 }
1284 1285
1285 1286 #ifdef SFCT_SUPPORT
1286 1287 if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1287 1288 (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1288 1289 mutex_exit(&EMLXS_PORT_LOCK);
1289 1290
1290 1291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1291 1292 "emlxs_detach: Driver busy. Target mode active.");
1292 1293
1293 1294 return (DDI_FAILURE);
1294 1295 }
1295 1296 #endif /* SFCT_SUPPORT */
1296 1297
1297 1298 if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) {
1298 1299 mutex_exit(&EMLXS_PORT_LOCK);
1299 1300
1300 1301 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1301 1302 "emlxs_detach: Driver busy. Initiator mode active.");
1302 1303
1303 1304 return (DDI_FAILURE);
1304 1305 }
1305 1306
1306 1307 hba->flag &= ~FC_DUMP_SAFE;
1307 1308
1308 1309 mutex_exit(&EMLXS_PORT_LOCK);
1309 1310
1310 1311 switch (cmd) {
1311 1312 case DDI_DETACH:
1312 1313
1313 1314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1314 1315 "DDI_DETACH");
1315 1316
1316 1317 rval = emlxs_hba_detach(dip);
1317 1318
1318 1319 if (rval != DDI_SUCCESS) {
1319 1320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1320 1321 "Unable to detach.");
1321 1322 }
1322 1323 break;
1323 1324
1324 1325
1325 1326 case DDI_PM_SUSPEND:
1326 1327
1327 1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1328 1329 "DDI_PM_SUSPEND");
1329 1330
1330 1331 /* This will suspend the driver */
1331 1332 rval = emlxs_pm_lower_power(dip);
1332 1333
1333 1334 if (rval != DDI_SUCCESS) {
1334 1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1335 1336 "Unable to lower power.");
1336 1337 }
1337 1338
1338 1339 break;
1339 1340
1340 1341
1341 1342 case DDI_SUSPEND:
1342 1343
1343 1344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1344 1345 "DDI_SUSPEND");
1345 1346
1346 1347 /* Suspend the driver */
1347 1348 rval = emlxs_hba_suspend(dip);
1348 1349
1349 1350 if (rval != DDI_SUCCESS) {
1350 1351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1351 1352 "Unable to suspend driver.");
1352 1353 }
1353 1354 break;
1354 1355
1355 1356
1356 1357 default:
1357 1358 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1358 1359 DRIVER_NAME, cmd);
1359 1360 rval = DDI_FAILURE;
1360 1361 }
1361 1362
1362 1363 if (rval == DDI_FAILURE) {
1363 1364 /* Re-Enable driver dump feature */
1364 1365 mutex_enter(&EMLXS_PORT_LOCK);
1365 1366 hba->flag |= FC_DUMP_SAFE;
1366 1367 mutex_exit(&EMLXS_PORT_LOCK);
1367 1368 }
1368 1369
1369 1370 return (rval);
1370 1371
1371 1372 } /* emlxs_detach() */
1372 1373
1373 1374
1374 1375 /* EMLXS_PORT_LOCK must be held when calling this */
1375 1376 extern void
1376 1377 emlxs_port_init(emlxs_port_t *port)
1377 1378 {
1378 1379 emlxs_hba_t *hba = HBA;
1379 1380
1380 1381 /* Initialize the base node */
1381 1382 bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1382 1383 port->node_base.nlp_Rpi = 0;
1383 1384 port->node_base.nlp_DID = 0xffffff;
1384 1385 port->node_base.nlp_list_next = NULL;
1385 1386 port->node_base.nlp_list_prev = NULL;
1386 1387 port->node_base.nlp_active = 1;
1387 1388 port->node_base.nlp_base = 1;
1388 1389 port->node_count = 0;
1389 1390
1390 1391 if (!(port->flag & EMLXS_PORT_ENABLE)) {
1391 1392 uint8_t dummy_wwn[8] =
1392 1393 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1393 1394
1394 1395 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1395 1396 sizeof (NAME_TYPE));
1396 1397 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1397 1398 sizeof (NAME_TYPE));
1398 1399 }
1399 1400
1400 1401 if (!(port->flag & EMLXS_PORT_CONFIG)) {
1401 1402 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256);
1402 1403 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256);
1403 1404 }
1404 1405
1405 1406 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1406 1407 sizeof (SERV_PARM));
1407 1408 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1408 1409 sizeof (NAME_TYPE));
1409 1410 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1410 1411 sizeof (NAME_TYPE));
1411 1412
1412 1413 return;
1413 1414
1414 1415 } /* emlxs_port_init() */
1415 1416
1416 1417
1417 1418 void
1418 1419 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1419 1420 {
1420 1421 uint16_t reg;
1421 1422
1422 1423 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1423 1424 return;
1424 1425 }
1425 1426
1426 1427 /* Turn off the Correctable Error Reporting */
1427 1428 /* (the Device Control Register, bit 0). */
1428 1429 reg = ddi_get16(hba->pci_acc_handle,
1429 1430 (uint16_t *)(hba->pci_addr +
1430 1431 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1431 1432 PCIE_DEVCTL_OFFSET));
1432 1433
1433 1434 reg &= ~1;
1434 1435
1435 1436 (void) ddi_put16(hba->pci_acc_handle,
1436 1437 (uint16_t *)(hba->pci_addr +
1437 1438 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1438 1439 PCIE_DEVCTL_OFFSET),
1439 1440 reg);
1440 1441
1441 1442 return;
1442 1443
1443 1444 } /* emlxs_disable_pcie_ce_err() */
1444 1445
1445 1446
1446 1447 /*
1447 1448 * emlxs_fca_bind_port
1448 1449 *
1449 1450 * Arguments:
1450 1451 *
1451 1452 * dip: the dev_info pointer for the ddiinst
1452 1453 * port_info: pointer to info handed back to the transport
1453 1454 * bind_info: pointer to info from the transport
1454 1455 *
1455 1456 * Return values: a port handle for this port, NULL for failure
1456 1457 *
1457 1458 */
1458 1459 static opaque_t
1459 1460 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1460 1461 fc_fca_bind_info_t *bind_info)
1461 1462 {
1462 1463 emlxs_hba_t *hba;
1463 1464 emlxs_port_t *port;
1464 1465 emlxs_port_t *vport;
1465 1466 int ddiinst;
1466 1467 emlxs_vpd_t *vpd;
1467 1468 emlxs_config_t *cfg;
1468 1469 char *dptr;
1469 1470 char buffer[16];
1470 1471 uint32_t length;
1471 1472 uint32_t len;
1472 1473 char topology[32];
1473 1474 char linkspeed[32];
1474 1475
1475 1476 ddiinst = ddi_get_instance(dip);
1476 1477 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1477 1478 port = &PPORT;
1478 1479
1479 1480 ddiinst = hba->ddiinst;
1480 1481 vpd = &VPD;
1481 1482 cfg = &CFG;
1482 1483
1483 1484 mutex_enter(&EMLXS_PORT_LOCK);
1484 1485
1485 1486 if (bind_info->port_num > 0) {
1486 1487 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1487 1488 if (!(hba->flag & FC_NPIV_ENABLED) ||
1488 1489 !(bind_info->port_npiv) ||
1489 1490 (bind_info->port_num > hba->vpi_max))
1490 1491 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1491 1492 if (!(hba->flag & FC_NPIV_ENABLED) ||
1492 1493 (bind_info->port_num > hba->vpi_high))
1493 1494 #endif
1494 1495 {
1495 1496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1496 1497 "fca_bind_port: Port %d not supported.",
1497 1498 bind_info->port_num);
1498 1499
1499 1500 mutex_exit(&EMLXS_PORT_LOCK);
1500 1501
1501 1502 port_info->pi_error = FC_OUTOFBOUNDS;
1502 1503 return (NULL);
1503 1504 }
1504 1505 }
1505 1506
1506 1507 /* Get true port pointer */
1507 1508 port = &VPORT(bind_info->port_num);
1508 1509
1509 1510 if (port->tgt_mode) {
1510 1511 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1511 1512 "fca_bind_port: Port %d is in target mode.",
1512 1513 bind_info->port_num);
1513 1514
1514 1515 mutex_exit(&EMLXS_PORT_LOCK);
1515 1516
1516 1517 port_info->pi_error = FC_OUTOFBOUNDS;
1517 1518 return (NULL);
1518 1519 }
1519 1520
1520 1521 if (!port->ini_mode) {
1521 1522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1522 1523 "fca_bind_port: Port %d is not in initiator mode.",
1523 1524 bind_info->port_num);
1524 1525
1525 1526 mutex_exit(&EMLXS_PORT_LOCK);
1526 1527
1527 1528 port_info->pi_error = FC_OUTOFBOUNDS;
1528 1529 return (NULL);
1529 1530 }
1530 1531
1531 1532 /* Make sure the port is not already bound to the transport */
1532 1533 if (port->flag & EMLXS_PORT_BOUND) {
1533 1534
1534 1535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1535 1536 "fca_bind_port: Port %d already bound. flag=%x",
1536 1537 bind_info->port_num, port->flag);
1537 1538
1538 1539 mutex_exit(&EMLXS_PORT_LOCK);
1539 1540
1540 1541 port_info->pi_error = FC_ALREADY;
1541 1542 return (NULL);
1542 1543 }
1543 1544
1544 1545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1545 1546 "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1546 1547 bind_info->port_num, port_info, bind_info);
1547 1548
1548 1549 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1549 1550 if (bind_info->port_npiv) {
1550 1551 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1551 1552 sizeof (NAME_TYPE));
1552 1553 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1553 1554 sizeof (NAME_TYPE));
1554 1555 if (port->snn[0] == 0) {
1555 1556 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1556 1557 256);
1557 1558 }
1558 1559
1559 1560 if (port->spn[0] == 0) {
1560 1561 (void) sprintf((caddr_t)port->spn, "%s VPort-%d",
1561 1562 (caddr_t)hba->spn, port->vpi);
1562 1563 }
1563 1564 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
1564 1565 }
1565 1566 #endif /* >= EMLXS_MODREV5 */
1566 1567
1567 1568 /*
1568 1569 * Restricted login should apply both physical and
1569 1570 * virtual ports.
1570 1571 */
1571 1572 if (cfg[CFG_VPORT_RESTRICTED].current) {
1572 1573 port->flag |= EMLXS_PORT_RESTRICTED;
1573 1574 }
1574 1575
1575 1576 /* Perform generic port initialization */
1576 1577 emlxs_port_init(port);
1577 1578
1578 1579 /* Perform SFS specific initialization */
1579 1580 port->ulp_handle = bind_info->port_handle;
1580 1581 port->ulp_statec_cb = bind_info->port_statec_cb;
1581 1582 port->ulp_unsol_cb = bind_info->port_unsol_cb;
1582 1583 port->ub_count = EMLXS_UB_TOKEN_OFFSET;
1583 1584 port->ub_pool = NULL;
1584 1585
1585 1586 /* Update the port info structure */
1586 1587
1587 1588 /* Set the topology and state */
1588 1589 if ((hba->state < FC_LINK_UP) ||
1589 1590 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) ||
1590 1591 !(hba->flag & FC_NPIV_SUPPORTED)))) {
1591 1592 port_info->pi_port_state = FC_STATE_OFFLINE;
1592 1593 port_info->pi_topology = FC_TOP_UNKNOWN;
1593 1594 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
1594 1595 (port->VPIobj.state == VPI_STATE_OFFLINE)) {
1595 1596 port_info->pi_port_state = FC_STATE_OFFLINE;
1596 1597 port_info->pi_topology = FC_TOP_UNKNOWN;
1597 1598 }
1598 1599 #ifdef MENLO_SUPPORT
1599 1600 else if (hba->flag & FC_MENLO_MODE) {
1600 1601 port_info->pi_port_state = FC_STATE_OFFLINE;
1601 1602 port_info->pi_topology = FC_TOP_UNKNOWN;
1602 1603 }
1603 1604 #endif /* MENLO_SUPPORT */
1604 1605 else {
1605 1606 /* Check for loop topology */
1606 1607 if (hba->topology == TOPOLOGY_LOOP) {
1607 1608 port_info->pi_port_state = FC_STATE_LOOP;
1608 1609 (void) strcpy(topology, ", loop");
1609 1610
1610 1611 if (hba->flag & FC_FABRIC_ATTACHED) {
1611 1612 port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1612 1613 } else {
1613 1614 port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1614 1615 }
1615 1616 } else {
1616 1617 port_info->pi_topology = FC_TOP_FABRIC;
1617 1618 port_info->pi_port_state = FC_STATE_ONLINE;
1618 1619 (void) strcpy(topology, ", fabric");
1619 1620 }
1620 1621
1621 1622 /* Set the link speed */
1622 1623 switch (hba->linkspeed) {
1623 1624 case 0:
1624 1625 (void) strcpy(linkspeed, "Gb");
1625 1626 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1626 1627 break;
1627 1628
1628 1629 case LA_1GHZ_LINK:
1629 1630 (void) strcpy(linkspeed, "1Gb");
1630 1631 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1631 1632 break;
1632 1633 case LA_2GHZ_LINK:
1633 1634 (void) strcpy(linkspeed, "2Gb");
1634 1635 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1635 1636 break;
1636 1637 case LA_4GHZ_LINK:
1637 1638 (void) strcpy(linkspeed, "4Gb");
1638 1639 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1639 1640 break;
1640 1641 case LA_8GHZ_LINK:
1641 1642 (void) strcpy(linkspeed, "8Gb");
1642 1643 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1643 1644 break;
1644 1645 case LA_10GHZ_LINK:
1645 1646 (void) strcpy(linkspeed, "10Gb");
1646 1647 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1647 1648 break;
1648 1649 default:
1649 1650 (void) sprintf(linkspeed, "unknown(0x%x)",
1650 1651 hba->linkspeed);
1651 1652 break;
1652 1653 }
1653 1654
1654 1655 /* Adjusting port context for link up messages */
1655 1656 vport = port;
1656 1657 port = &PPORT;
1657 1658 if (vport->vpi == 0) {
1658 1659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s",
1659 1660 linkspeed, topology);
1660 1661 } else if (!(hba->flag & FC_NPIV_LINKUP)) {
1661 1662 hba->flag |= FC_NPIV_LINKUP;
1662 1663 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg,
1663 1664 "%s%s", linkspeed, topology);
1664 1665 }
1665 1666 port = vport;
1666 1667
1667 1668 }
1668 1669
1669 1670 /* PCIE Correctable Error Reporting workaround */
1670 1671 if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1671 1672 (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1672 1673 (bind_info->port_num == 0)) {
1673 1674 emlxs_disable_pcie_ce_err(hba);
1674 1675 }
1675 1676
1676 1677 /* Save initial state */
1677 1678 port->ulp_statec = port_info->pi_port_state;
1678 1679
1679 1680 /*
1680 1681 * The transport needs a copy of the common service parameters
1681 1682 * for this port. The transport can get any updates through
1682 1683 * the getcap entry point.
1683 1684 */
1684 1685 bcopy((void *) &port->sparam,
1685 1686 (void *) &port_info->pi_login_params.common_service,
1686 1687 sizeof (SERV_PARM));
1687 1688
1688 1689 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1689 1690 /* Swap the service parameters for ULP */
1690 1691 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1691 1692 common_service);
1692 1693 #endif /* EMLXS_MODREV2X */
1693 1694
1694 1695 port_info->pi_login_params.common_service.btob_credit = 0xffff;
1695 1696
1696 1697 bcopy((void *) &port->wwnn,
1697 1698 (void *) &port_info->pi_login_params.node_ww_name,
1698 1699 sizeof (NAME_TYPE));
1699 1700
1700 1701 bcopy((void *) &port->wwpn,
1701 1702 (void *) &port_info->pi_login_params.nport_ww_name,
1702 1703 sizeof (NAME_TYPE));
1703 1704
1704 1705 /*
1705 1706 * We need to turn off CLASS2 support.
1706 1707 * Otherwise, FC transport will use CLASS2 as default class
1707 1708 * and never try with CLASS3.
1708 1709 */
1709 1710 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1710 1711 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1711 1712 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1712 1713 port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1713 1714 }
1714 1715
1715 1716 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1716 1717 port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1717 1718 }
1718 1719 #else /* EMLXS_SPARC or EMLXS_MODREV2X */
1719 1720 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1720 1721 port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1721 1722 }
1722 1723
1723 1724 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1724 1725 port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1725 1726 }
1726 1727 #endif /* >= EMLXS_MODREV3X */
1727 1728 #endif /* >= EMLXS_MODREV3 */
1728 1729
1729 1730
1730 1731 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1731 1732 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1732 1733 port_info->pi_login_params.class_1.data[0] &= ~0x80;
1733 1734 }
1734 1735
1735 1736 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1736 1737 port_info->pi_login_params.class_2.data[0] &= ~0x80;
1737 1738 }
1738 1739 #endif /* <= EMLXS_MODREV2 */
1739 1740
1740 1741 /* Additional parameters */
1741 1742 port_info->pi_s_id.port_id = port->did;
1742 1743 port_info->pi_s_id.priv_lilp_posit = 0;
1743 1744 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1744 1745
1745 1746 /* Initialize the RNID parameters */
1746 1747 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1747 1748
1748 1749 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id,
1749 1750 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1750 1751 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1751 1752 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1752 1753 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1753 1754
1754 1755 port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1755 1756 port_info->pi_rnid_params.params.port_id = port->did;
1756 1757 port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1757 1758
1758 1759 /* Initialize the port attributes */
1759 1760 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1760 1761
1761 1762 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex");
1762 1763
1763 1764 port_info->pi_rnid_params.status = FC_SUCCESS;
1764 1765
1765 1766 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num);
1766 1767
1767 1768 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)",
1768 1769 vpd->fw_version, vpd->fw_label);
1769 1770
1770 1771 #ifdef EMLXS_I386
1771 1772 (void) sprintf(port_info->pi_attrs.option_rom_version,
1772 1773 "Boot:%s", vpd->boot_version);
1773 1774 #else /* EMLXS_SPARC */
1774 1775 (void) sprintf(port_info->pi_attrs.option_rom_version,
1775 1776 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1776 1777 #endif /* EMLXS_I386 */
1777 1778
1778 1779
1779 1780 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)",
1780 1781 emlxs_version, emlxs_revision);
1781 1782
1782 1783 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME);
1783 1784
1784 1785 port_info->pi_attrs.vendor_specific_id =
1785 1786 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1786 1787
1787 1788 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1788 1789
1789 1790 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1790 1791
1791 1792 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1792 1793
1793 1794 port_info->pi_rnid_params.params.num_attached = 0;
1794 1795
1795 1796 /*
1796 1797 * Copy the serial number string (right most 16 chars) into the right
1797 1798 * justified local buffer
1798 1799 */
1799 1800 bzero(buffer, sizeof (buffer));
1800 1801 length = strlen(vpd->serial_num);
1801 1802 len = (length > 16) ? 16 : length;
1802 1803 bcopy(&vpd->serial_num[(length - len)],
1803 1804 &buffer[(sizeof (buffer) - len)], len);
1804 1805
1805 1806 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index;
1806 1807
1807 1808 #endif /* >= EMLXS_MODREV5 */
1808 1809
1809 1810 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1810 1811
1811 1812 port_info->pi_rnid_params.params.num_attached = 0;
1812 1813
1813 1814 if (hba->flag & FC_NPIV_ENABLED) {
1814 1815 uint8_t byte;
1815 1816 uint8_t *wwpn;
1816 1817 uint32_t i;
1817 1818 uint32_t j;
1818 1819
1819 1820 /* Copy the WWPN as a string into the local buffer */
1820 1821 wwpn = (uint8_t *)&hba->wwpn;
1821 1822 for (i = 0; i < 16; i++) {
1822 1823 byte = *wwpn++;
1823 1824 j = ((byte & 0xf0) >> 4);
1824 1825 if (j <= 9) {
1825 1826 buffer[i] =
1826 1827 (char)((uint8_t)'0' + (uint8_t)j);
1827 1828 } else {
1828 1829 buffer[i] =
1829 1830 (char)((uint8_t)'A' + (uint8_t)(j -
1830 1831 10));
1831 1832 }
1832 1833
1833 1834 i++;
1834 1835 j = (byte & 0xf);
1835 1836 if (j <= 9) {
1836 1837 buffer[i] =
1837 1838 (char)((uint8_t)'0' + (uint8_t)j);
1838 1839 } else {
1839 1840 buffer[i] =
1840 1841 (char)((uint8_t)'A' + (uint8_t)(j -
1841 1842 10));
1842 1843 }
1843 1844 }
1844 1845
1845 1846 port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1846 1847 } else {
1847 1848 /* Copy the serial number string (right most 16 chars) */
1848 1849 /* into the right justified local buffer */
1849 1850 bzero(buffer, sizeof (buffer));
1850 1851 length = strlen(vpd->serial_num);
1851 1852 len = (length > 16) ? 16 : length;
1852 1853 bcopy(&vpd->serial_num[(length - len)],
1853 1854 &buffer[(sizeof (buffer) - len)], len);
1854 1855
1855 1856 port_info->pi_attrs.hba_fru_details.port_index =
1856 1857 vpd->port_index;
1857 1858 }
1858 1859
1859 1860 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1860 1861
1861 1862 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1862 1863
1863 1864 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1864 1865 dptr[0] = buffer[0];
1865 1866 dptr[1] = buffer[1];
1866 1867 dptr[2] = buffer[2];
1867 1868 dptr[3] = buffer[3];
1868 1869 dptr[4] = buffer[4];
1869 1870 dptr[5] = buffer[5];
1870 1871 dptr[6] = buffer[6];
1871 1872 dptr[7] = buffer[7];
1872 1873 port_info->pi_attrs.hba_fru_details.high =
1873 1874 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1874 1875
1875 1876 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1876 1877 dptr[0] = buffer[8];
1877 1878 dptr[1] = buffer[9];
1878 1879 dptr[2] = buffer[10];
1879 1880 dptr[3] = buffer[11];
1880 1881 dptr[4] = buffer[12];
1881 1882 dptr[5] = buffer[13];
1882 1883 dptr[6] = buffer[14];
1883 1884 dptr[7] = buffer[15];
1884 1885 port_info->pi_attrs.hba_fru_details.low =
1885 1886 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1886 1887
1887 1888 #endif /* >= EMLXS_MODREV3 */
1888 1889
1889 1890 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1890 1891 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1891 1892 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1892 1893 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1893 1894 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1894 1895 #endif /* >= EMLXS_MODREV4 */
1895 1896
1896 1897 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev);
1897 1898
1898 1899 /* Set the hba speed limit */
1899 1900 if (vpd->link_speed & LMT_10GB_CAPABLE) {
1900 1901 port_info->pi_attrs.supported_speed |=
1901 1902 FC_HBA_PORTSPEED_10GBIT;
1902 1903 }
1903 1904 if (vpd->link_speed & LMT_8GB_CAPABLE) {
1904 1905 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1905 1906 }
1906 1907 if (vpd->link_speed & LMT_4GB_CAPABLE) {
1907 1908 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1908 1909 }
1909 1910 if (vpd->link_speed & LMT_2GB_CAPABLE) {
1910 1911 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1911 1912 }
1912 1913 if (vpd->link_speed & LMT_1GB_CAPABLE) {
1913 1914 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1914 1915 }
1915 1916
1916 1917 /* Set the hba model info */
1917 1918 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model);
1918 1919 (void) strcpy(port_info->pi_attrs.model_description,
1919 1920 hba->model_info.model_desc);
1920 1921
1921 1922
1922 1923 /* Log information */
1923 1924 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1924 1925 "Bind info: port_num = %d", bind_info->port_num);
1925 1926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1926 1927 "Bind info: port_handle = %p", bind_info->port_handle);
1927 1928
1928 1929 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1929 1930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1930 1931 "Bind info: port_npiv = %d", bind_info->port_npiv);
1931 1932 #endif /* >= EMLXS_MODREV5 */
1932 1933
1933 1934 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1934 1935 "Port info: pi_topology = %x", port_info->pi_topology);
1935 1936 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1936 1937 "Port info: pi_error = %x", port_info->pi_error);
1937 1938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1938 1939 "Port info: pi_port_state = %x", port_info->pi_port_state);
1939 1940
1940 1941 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1941 1942 "Port info: port_id = %x", port_info->pi_s_id.port_id);
1942 1943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1943 1944 "Port info: priv_lilp_posit = %x",
1944 1945 port_info->pi_s_id.priv_lilp_posit);
1945 1946
1946 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1947 1948 "Port info: hard_addr = %x",
1948 1949 port_info->pi_hard_addr.hard_addr);
1949 1950
1950 1951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1951 1952 "Port info: rnid.status = %x",
1952 1953 port_info->pi_rnid_params.status);
1953 1954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1954 1955 "Port info: rnid.global_id = %16s",
1955 1956 port_info->pi_rnid_params.params.global_id);
1956 1957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1957 1958 "Port info: rnid.unit_type = %x",
1958 1959 port_info->pi_rnid_params.params.unit_type);
1959 1960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1960 1961 "Port info: rnid.port_id = %x",
1961 1962 port_info->pi_rnid_params.params.port_id);
1962 1963 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 1964 "Port info: rnid.num_attached = %x",
1964 1965 port_info->pi_rnid_params.params.num_attached);
1965 1966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1966 1967 "Port info: rnid.ip_version = %x",
1967 1968 port_info->pi_rnid_params.params.ip_version);
1968 1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 1970 "Port info: rnid.udp_port = %x",
1970 1971 port_info->pi_rnid_params.params.udp_port);
1971 1972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1972 1973 "Port info: rnid.ip_addr = %16s",
1973 1974 port_info->pi_rnid_params.params.ip_addr);
1974 1975 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 1976 "Port info: rnid.spec_id_resv = %x",
1976 1977 port_info->pi_rnid_params.params.specific_id_resv);
1977 1978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 1979 "Port info: rnid.topo_flags = %x",
1979 1980 port_info->pi_rnid_params.params.topo_flags);
1980 1981
1981 1982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 1983 "Port info: manufacturer = %s",
1983 1984 port_info->pi_attrs.manufacturer);
1984 1985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 1986 "Port info: serial_num = %s",
1986 1987 port_info->pi_attrs.serial_number);
1987 1988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1988 1989 "Port info: model = %s", port_info->pi_attrs.model);
1989 1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1990 1991 "Port info: model_description = %s",
1991 1992 port_info->pi_attrs.model_description);
1992 1993 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 1994 "Port info: hardware_version = %s",
1994 1995 port_info->pi_attrs.hardware_version);
1995 1996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 1997 "Port info: driver_version = %s",
1997 1998 port_info->pi_attrs.driver_version);
1998 1999 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 2000 "Port info: option_rom_version = %s",
2000 2001 port_info->pi_attrs.option_rom_version);
2001 2002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 2003 "Port info: firmware_version = %s",
2003 2004 port_info->pi_attrs.firmware_version);
2004 2005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 2006 "Port info: driver_name = %s",
2006 2007 port_info->pi_attrs.driver_name);
2007 2008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 2009 "Port info: vendor_specific_id = %x",
2009 2010 port_info->pi_attrs.vendor_specific_id);
2010 2011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 2012 "Port info: supported_cos = %x",
2012 2013 port_info->pi_attrs.supported_cos);
2013 2014 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 2015 "Port info: supported_speed = %x",
2015 2016 port_info->pi_attrs.supported_speed);
2016 2017 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 2018 "Port info: max_frame_size = %x",
2018 2019 port_info->pi_attrs.max_frame_size);
2019 2020
2020 2021 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2021 2022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 2023 "Port info: fru_port_index = %x",
2023 2024 port_info->pi_attrs.hba_fru_details.port_index);
2024 2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2025 2026 "Port info: fru_high = %llx",
2026 2027 port_info->pi_attrs.hba_fru_details.high);
2027 2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2028 2029 "Port info: fru_low = %llx",
2029 2030 port_info->pi_attrs.hba_fru_details.low);
2030 2031 #endif /* >= EMLXS_MODREV3 */
2031 2032
2032 2033 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2033 2034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2034 2035 "Port info: sym_node_name = %s",
2035 2036 port_info->pi_attrs.sym_node_name);
2036 2037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 2038 "Port info: sym_port_name = %s",
2038 2039 port_info->pi_attrs.sym_port_name);
2039 2040 #endif /* >= EMLXS_MODREV4 */
2040 2041
2041 2042 /* Set the bound flag */
2042 2043 port->flag |= EMLXS_PORT_BOUND;
2043 2044 hba->num_of_ports++;
2044 2045
2045 2046 mutex_exit(&EMLXS_PORT_LOCK);
2046 2047
2047 2048 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2048 2049 (void) emlxs_vpi_port_bind_notify(port);
2049 2050 }
2050 2051
2051 2052 return ((opaque_t)port);
2052 2053
2053 2054 } /* emlxs_fca_bind_port() */
2054 2055
2055 2056
2056 2057 static void
2057 2058 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2058 2059 {
2059 2060 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2060 2061 emlxs_hba_t *hba = HBA;
2061 2062
2062 2063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2063 2064 "fca_unbind_port: port=%p", port);
2064 2065
2065 2066 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2066 2067 (void) emlxs_vpi_port_unbind_notify(port, 1);
2067 2068 }
2068 2069
2069 2070 /* Destroy & flush all port nodes, if they exist */
2070 2071 if (port->node_count) {
2071 2072 (void) emlxs_mb_unreg_node(port, 0, 0, 0, 0);
2072 2073 }
2073 2074
2074 2075 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2075 2076 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2076 2077 (hba->flag & FC_NPIV_ENABLED) &&
2077 2078 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) {
2078 2079 (void) emlxs_mb_unreg_vpi(port);
2079 2080 }
2080 2081 #endif
2081 2082
2082 2083 mutex_enter(&EMLXS_PORT_LOCK);
2083 2084
2084 2085 if (!(port->flag & EMLXS_PORT_BOUND)) {
2085 2086 mutex_exit(&EMLXS_PORT_LOCK);
2086 2087 return;
2087 2088 }
2088 2089
2089 2090 port->flag &= ~EMLXS_PORT_BOUND;
2090 2091 hba->num_of_ports--;
2091 2092
2092 2093 port->ulp_handle = 0;
2093 2094 port->ulp_statec = FC_STATE_OFFLINE;
2094 2095 port->ulp_statec_cb = NULL;
2095 2096 port->ulp_unsol_cb = NULL;
2096 2097
2097 2098 mutex_exit(&EMLXS_PORT_LOCK);
2098 2099
2099 2100 return;
2100 2101
2101 2102 } /* emlxs_fca_unbind_port() */
2102 2103
2103 2104
2104 2105 /*ARGSUSED*/
2105 2106 extern int
2106 2107 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2107 2108 {
2108 2109 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2109 2110 emlxs_hba_t *hba = HBA;
2110 2111 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2111 2112
2112 2113 if (!sbp) {
2113 2114 return (FC_FAILURE);
2114 2115 }
2115 2116 bzero((void *)sbp, sizeof (emlxs_buf_t));
2116 2117
2117 2118 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2118 2119 sbp->pkt_flags =
2119 2120 PACKET_VALID | PACKET_ULP_OWNED;
2120 2121 sbp->port = port;
2121 2122 sbp->pkt = pkt;
2122 2123 sbp->iocbq.sbp = sbp;
2123 2124
2124 2125 return (FC_SUCCESS);
2125 2126
2126 2127 } /* emlxs_fca_pkt_init() */
2127 2128
2128 2129
2129 2130
2130 2131 static void
2131 2132 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2132 2133 {
2133 2134 emlxs_hba_t *hba = HBA;
2134 2135 emlxs_config_t *cfg = &CFG;
2135 2136 fc_packet_t *pkt = PRIV2PKT(sbp);
2136 2137 uint32_t *iptr;
2137 2138
2138 2139 mutex_enter(&sbp->mtx);
2139 2140
2140 2141 /* Reinitialize */
2141 2142 sbp->pkt = pkt;
2142 2143 sbp->port = port;
2143 2144 sbp->bmp = NULL;
2144 2145 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2145 2146 sbp->iotag = 0;
2146 2147 sbp->ticks = 0;
2147 2148 sbp->abort_attempts = 0;
2148 2149 sbp->fpkt = NULL;
2149 2150 sbp->flush_count = 0;
2150 2151 sbp->next = NULL;
2151 2152
2152 2153 if (!port->tgt_mode) {
2153 2154 sbp->node = NULL;
2154 2155 sbp->did = 0;
2155 2156 sbp->lun = EMLXS_LUN_NONE;
2156 2157 sbp->class = 0;
2157 2158 sbp->class = 0;
2158 2159 sbp->channel = NULL;
2159 2160 }
2160 2161
2161 2162 bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2162 2163 sbp->iocbq.sbp = sbp;
2163 2164
2164 2165 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2165 2166 ddi_in_panic()) {
2166 2167 sbp->pkt_flags |= PACKET_POLLED;
2167 2168 }
2168 2169
2169 2170 /* Prepare the fc packet */
2170 2171 pkt->pkt_state = FC_PKT_SUCCESS;
2171 2172 pkt->pkt_reason = 0;
2172 2173 pkt->pkt_action = 0;
2173 2174 pkt->pkt_expln = 0;
2174 2175 pkt->pkt_data_resid = 0;
2175 2176 pkt->pkt_resp_resid = 0;
2176 2177
2177 2178 /* Make sure all pkt's have a proper timeout */
2178 2179 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2179 2180 /* This disables all IOCB on chip timeouts */
2180 2181 pkt->pkt_timeout = 0x80000000;
2181 2182 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2182 2183 pkt->pkt_timeout = 60;
2183 2184 }
2184 2185
2185 2186 /* Clear the response buffer */
2186 2187 if (pkt->pkt_rsplen) {
2187 2188 /* Check for FCP commands */
2188 2189 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) ||
2189 2190 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
2190 2191 iptr = (uint32_t *)pkt->pkt_resp;
2191 2192 iptr[2] = 0;
2192 2193 iptr[3] = 0;
2193 2194 } else {
2194 2195 bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2195 2196 }
2196 2197 }
2197 2198
2198 2199 mutex_exit(&sbp->mtx);
2199 2200
2200 2201 return;
2201 2202
2202 2203 } /* emlxs_initialize_pkt() */
2203 2204
2204 2205
2205 2206
2206 2207 /*
2207 2208 * We may not need this routine
2208 2209 */
2209 2210 /*ARGSUSED*/
2210 2211 extern int
2211 2212 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2212 2213 {
2213 2214 emlxs_buf_t *sbp = PKT2PRIV(pkt);
2214 2215
2215 2216 if (!sbp) {
2216 2217 return (FC_FAILURE);
2217 2218 }
2218 2219
2219 2220 if (!(sbp->pkt_flags & PACKET_VALID)) {
2220 2221 return (FC_FAILURE);
2221 2222 }
2222 2223 sbp->pkt_flags &= ~PACKET_VALID;
2223 2224 mutex_destroy(&sbp->mtx);
2224 2225
2225 2226 return (FC_SUCCESS);
2226 2227
2227 2228 } /* emlxs_fca_pkt_uninit() */
2228 2229
2229 2230
2230 2231 static int
2231 2232 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2232 2233 {
2233 2234 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2234 2235 emlxs_hba_t *hba = HBA;
2235 2236 int32_t rval;
2236 2237
2237 2238 if (!(port->flag & EMLXS_PORT_BOUND)) {
2238 2239 return (FC_CAP_ERROR);
2239 2240 }
2240 2241
2241 2242 if (strcmp(cap, FC_NODE_WWN) == 0) {
2242 2243 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2243 2244 "fca_get_cap: FC_NODE_WWN");
2244 2245
2245 2246 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2246 2247 rval = FC_CAP_FOUND;
2247 2248
2248 2249 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2249 2250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2250 2251 "fca_get_cap: FC_LOGIN_PARAMS");
2251 2252
2252 2253 /*
2253 2254 * We need to turn off CLASS2 support.
2254 2255 * Otherwise, FC transport will use CLASS2 as default class
2255 2256 * and never try with CLASS3.
2256 2257 */
2257 2258 hba->sparam.cls2.classValid = 0;
2258 2259
2259 2260 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2260 2261
2261 2262 rval = FC_CAP_FOUND;
2262 2263
2263 2264 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2264 2265 int32_t *num_bufs;
2265 2266 emlxs_config_t *cfg = &CFG;
2266 2267
2267 2268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2268 2269 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2269 2270 cfg[CFG_UB_BUFS].current);
2270 2271
2271 2272 num_bufs = (int32_t *)ptr;
2272 2273
2273 2274 /* We multiply by MAX_VPORTS because ULP uses a */
2274 2275 /* formula to calculate ub bufs from this */
2275 2276 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2276 2277
2277 2278 rval = FC_CAP_FOUND;
2278 2279
2279 2280 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2280 2281 int32_t *size;
2281 2282
2282 2283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2283 2284 "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2284 2285
2285 2286 size = (int32_t *)ptr;
2286 2287 *size = -1;
2287 2288 rval = FC_CAP_FOUND;
2288 2289
2289 2290 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2290 2291 fc_reset_action_t *action;
2291 2292
2292 2293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2293 2294 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2294 2295
2295 2296 action = (fc_reset_action_t *)ptr;
2296 2297 *action = FC_RESET_RETURN_ALL;
2297 2298 rval = FC_CAP_FOUND;
2298 2299
2299 2300 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2300 2301 fc_dma_behavior_t *behavior;
2301 2302
2302 2303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2303 2304 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2304 2305
2305 2306 behavior = (fc_dma_behavior_t *)ptr;
2306 2307 *behavior = FC_ALLOW_STREAMING;
2307 2308 rval = FC_CAP_FOUND;
2308 2309
2309 2310 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2310 2311 fc_fcp_dma_t *fcp_dma;
2311 2312
2312 2313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2313 2314 "fca_get_cap: FC_CAP_FCP_DMA");
2314 2315
2315 2316 fcp_dma = (fc_fcp_dma_t *)ptr;
2316 2317 *fcp_dma = FC_DVMA_SPACE;
2317 2318 rval = FC_CAP_FOUND;
2318 2319
2319 2320 } else {
2320 2321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2321 2322 "fca_get_cap: Unknown capability. [%s]", cap);
2322 2323
2323 2324 rval = FC_CAP_ERROR;
2324 2325
2325 2326 }
2326 2327
2327 2328 return (rval);
2328 2329
2329 2330 } /* emlxs_fca_get_cap() */
2330 2331
2331 2332
2332 2333
2333 2334 static int
2334 2335 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2335 2336 {
2336 2337 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2337 2338
2338 2339 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2339 2340 "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2340 2341
2341 2342 return (FC_CAP_ERROR);
2342 2343
2343 2344 } /* emlxs_fca_set_cap() */
2344 2345
2345 2346
2346 2347 static opaque_t
2347 2348 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2348 2349 {
2349 2350 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2350 2351
2351 2352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2352 2353 "fca_get_device: did=%x", d_id.port_id);
2353 2354
2354 2355 return (NULL);
2355 2356
2356 2357 } /* emlxs_fca_get_device() */
2357 2358
2358 2359
2359 2360 static int32_t
2360 2361 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2361 2362 {
2362 2363 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2363 2364
2364 2365 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2365 2366 cmd);
2366 2367
2367 2368 return (FC_SUCCESS);
2368 2369
2369 2370 } /* emlxs_fca_notify */
2370 2371
2371 2372
2372 2373
2373 2374 static int
2374 2375 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2375 2376 {
2376 2377 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2377 2378 emlxs_hba_t *hba = HBA;
2378 2379 uint32_t lilp_length;
2379 2380
2380 2381 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2381 2382 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2382 2383 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2383 2384 port->alpa_map[3], port->alpa_map[4]);
2384 2385
2385 2386 if (!(port->flag & EMLXS_PORT_BOUND)) {
2386 2387 return (FC_NOMAP);
2387 2388 }
2388 2389
2389 2390 if (hba->topology != TOPOLOGY_LOOP) {
2390 2391 return (FC_NOMAP);
2391 2392 }
2392 2393
2393 2394 /* Check if alpa map is available */
2394 2395 if (port->alpa_map[0] != 0) {
2395 2396 mapbuf->lilp_magic = MAGIC_LILP;
2396 2397 } else { /* No LILP map available */
2397 2398
2398 2399 /* Set lilp_magic to MAGIC_LISA and this will */
2399 2400 /* trigger an ALPA scan in ULP */
2400 2401 mapbuf->lilp_magic = MAGIC_LISA;
2401 2402 }
2402 2403
2403 2404 mapbuf->lilp_myalpa = port->did;
2404 2405
2405 2406 /* The first byte of the alpa_map is the lilp map length */
2406 2407 /* Add one to include the lilp length byte itself */
2407 2408 lilp_length = (uint32_t)port->alpa_map[0] + 1;
2408 2409
2409 2410 /* Make sure the max transfer is 128 bytes */
2410 2411 if (lilp_length > 128) {
2411 2412 lilp_length = 128;
2412 2413 }
2413 2414
2414 2415 /* We start copying from the lilp_length field */
2415 2416 /* in order to get a word aligned address */
2416 2417 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2417 2418 lilp_length);
2418 2419
2419 2420 return (FC_SUCCESS);
2420 2421
2421 2422 } /* emlxs_fca_get_map() */
2422 2423
2423 2424
2424 2425
2425 2426 extern int
2426 2427 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2427 2428 {
2428 2429 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2429 2430 emlxs_hba_t *hba = HBA;
2430 2431 emlxs_buf_t *sbp;
2431 2432 uint32_t rval;
2432 2433 uint32_t pkt_flags;
2433 2434
2434 2435 /* Make sure adapter is online */
2435 2436 if (!(hba->flag & FC_ONLINE_MODE)) {
2436 2437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2437 2438 "Adapter offline.");
2438 2439
2439 2440 return (FC_OFFLINE);
2440 2441 }
2441 2442
2442 2443 /* Validate packet */
2443 2444 sbp = PKT2PRIV(pkt);
2444 2445
2445 2446 /* Make sure ULP was told that the port was online */
2446 2447 if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2447 2448 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2448 2449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2449 2450 "Port offline.");
2450 2451
2451 2452 return (FC_OFFLINE);
2452 2453 }
2453 2454
2454 2455 if (sbp->port != port) {
2455 2456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2456 2457 "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2457 2458 sbp->port, sbp->pkt_flags);
2458 2459 return (FC_BADPACKET);
2459 2460 }
2460 2461
2461 2462 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2462 2463 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2463 2464 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2464 2465 sbp->port, sbp->pkt_flags);
2465 2466 return (FC_BADPACKET);
2466 2467 }
2467 2468 #ifdef SFCT_SUPPORT
2468 2469 if (port->tgt_mode && !sbp->fct_cmd &&
2469 2470 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2470 2471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2471 2472 "Packet blocked. Target mode.");
2472 2473 return (FC_TRANSPORT_ERROR);
2473 2474 }
2474 2475 #endif /* SFCT_SUPPORT */
2475 2476
2476 2477 #ifdef IDLE_TIMER
2477 2478 emlxs_pm_busy_component(hba);
2478 2479 #endif /* IDLE_TIMER */
2479 2480
2480 2481 /* Prepare the packet for transport */
2481 2482 emlxs_initialize_pkt(port, sbp);
2482 2483
2483 2484 /* Save a copy of the pkt flags. */
2484 2485 /* We will check the polling flag later */
2485 2486 pkt_flags = sbp->pkt_flags;
2486 2487
2487 2488 /* Send the packet */
2488 2489 switch (pkt->pkt_tran_type) {
2489 2490 case FC_PKT_FCP_READ:
2490 2491 case FC_PKT_FCP_WRITE:
2491 2492 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2492 2493 break;
2493 2494
2494 2495 case FC_PKT_IP_WRITE:
2495 2496 case FC_PKT_BROADCAST:
2496 2497 rval = emlxs_send_ip(port, sbp);
2497 2498 break;
2498 2499
2499 2500 case FC_PKT_EXCHANGE:
2500 2501 switch (pkt->pkt_cmd_fhdr.type) {
2501 2502 case FC_TYPE_SCSI_FCP:
2502 2503 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2503 2504 break;
2504 2505
2505 2506 case FC_TYPE_FC_SERVICES:
2506 2507 rval = emlxs_send_ct(port, sbp);
2507 2508 break;
2508 2509
2509 2510 #ifdef MENLO_SUPPORT
2510 2511 case EMLXS_MENLO_TYPE:
2511 2512 rval = emlxs_send_menlo(port, sbp);
2512 2513 break;
2513 2514 #endif /* MENLO_SUPPORT */
2514 2515
2515 2516 default:
2516 2517 rval = emlxs_send_els(port, sbp);
2517 2518 }
2518 2519 break;
2519 2520
2520 2521 case FC_PKT_OUTBOUND:
2521 2522 switch (pkt->pkt_cmd_fhdr.type) {
2522 2523 #ifdef SFCT_SUPPORT
2523 2524 case FC_TYPE_SCSI_FCP:
2524 2525 rval = emlxs_send_fct_status(port, sbp);
2525 2526 break;
2526 2527
2527 2528 case FC_TYPE_BASIC_LS:
2528 2529 rval = emlxs_send_fct_abort(port, sbp);
2529 2530 break;
2530 2531 #endif /* SFCT_SUPPORT */
2531 2532
2532 2533 case FC_TYPE_FC_SERVICES:
2533 2534 rval = emlxs_send_ct_rsp(port, sbp);
2534 2535 break;
2535 2536 #ifdef MENLO_SUPPORT
2536 2537 case EMLXS_MENLO_TYPE:
2537 2538 rval = emlxs_send_menlo(port, sbp);
2538 2539 break;
2539 2540 #endif /* MENLO_SUPPORT */
2540 2541
2541 2542 default:
2542 2543 rval = emlxs_send_els_rsp(port, sbp);
2543 2544 }
2544 2545 break;
2545 2546
2546 2547 default:
2547 2548 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2548 2549 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2549 2550 rval = FC_TRANSPORT_ERROR;
2550 2551 break;
2551 2552 }
2552 2553
2553 2554 /* Check if send was not successful */
2554 2555 if (rval != FC_SUCCESS) {
2555 2556 /* Return packet to ULP */
2556 2557 mutex_enter(&sbp->mtx);
2557 2558 sbp->pkt_flags |= PACKET_ULP_OWNED;
2558 2559 mutex_exit(&sbp->mtx);
2559 2560
2560 2561 return (rval);
2561 2562 }
2562 2563
2563 2564 /* Check if this packet should be polled for completion before */
2564 2565 /* returning. This check must be done with a saved copy of the */
2565 2566 /* pkt_flags because the packet itself could already be freed from */
2566 2567 /* memory if it was not polled. */
2567 2568 if (pkt_flags & PACKET_POLLED) {
2568 2569 emlxs_poll(port, sbp);
2569 2570 }
2570 2571
2571 2572 return (FC_SUCCESS);
2572 2573
2573 2574 } /* emlxs_fca_transport() */
2574 2575
2575 2576
2576 2577
2577 2578 static void
2578 2579 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2579 2580 {
2580 2581 emlxs_hba_t *hba = HBA;
2581 2582 fc_packet_t *pkt = PRIV2PKT(sbp);
2582 2583 clock_t timeout;
2583 2584 clock_t time;
2584 2585 uint32_t att_bit;
2585 2586 CHANNEL *cp;
2586 2587 int in_panic = 0;
2587 2588
2588 2589 mutex_enter(&EMLXS_PORT_LOCK);
2589 2590 hba->io_poll_count++;
2590 2591 mutex_exit(&EMLXS_PORT_LOCK);
2591 2592
2592 2593 /* Check for panic situation */
2593 2594 cp = (CHANNEL *)sbp->channel;
2594 2595
2595 2596 if (ddi_in_panic()) {
2596 2597 in_panic = 1;
2597 2598 /*
2598 2599 * In panic situations there will be one thread with
2599 2600 * no interrrupts (hard or soft) and no timers
2600 2601 */
2601 2602
2602 2603 /*
2603 2604 * We must manually poll everything in this thread
2604 2605 * to keep the driver going.
2605 2606 */
2606 2607 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2607 2608 switch (cp->channelno) {
2608 2609 case FC_FCP_RING:
2609 2610 att_bit = HA_R0ATT;
2610 2611 break;
2611 2612
2612 2613 case FC_IP_RING:
2613 2614 att_bit = HA_R1ATT;
2614 2615 break;
2615 2616
2616 2617 case FC_ELS_RING:
2617 2618 att_bit = HA_R2ATT;
2618 2619 break;
2619 2620
2620 2621 case FC_CT_RING:
2621 2622 att_bit = HA_R3ATT;
2622 2623 break;
2623 2624 }
2624 2625 }
2625 2626
2626 2627 /* Keep polling the chip until our IO is completed */
2627 2628 /* Driver's timer will not function during panics. */
2628 2629 /* Therefore, timer checks must be performed manually. */
2629 2630 (void) drv_getparm(LBOLT, &time);
2630 2631 timeout = time + drv_usectohz(1000000);
2631 2632 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2632 2633 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
2633 2634 EMLXS_SLI_POLL_INTR(hba, att_bit);
2634 2635 } else {
2635 2636 EMLXS_SLI_POLL_INTR(hba, 0);
2636 2637 }
2637 2638 (void) drv_getparm(LBOLT, &time);
2638 2639
2639 2640 /* Trigger timer checks periodically */
2640 2641 if (time >= timeout) {
2641 2642 emlxs_timer_checks(hba);
2642 2643 timeout = time + drv_usectohz(1000000);
2643 2644 }
2644 2645 }
2645 2646 } else {
2646 2647 /* Wait for IO completion */
2647 2648 /* The driver's timer will detect */
2648 2649 /* any timeout and abort the I/O. */
2649 2650 mutex_enter(&EMLXS_PKT_LOCK);
2650 2651 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2651 2652 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2652 2653 }
2653 2654 mutex_exit(&EMLXS_PKT_LOCK);
2654 2655 }
2655 2656
2656 2657 /* Check for fcp reset pkt */
2657 2658 if (sbp->pkt_flags & PACKET_FCP_RESET) {
2658 2659 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2659 2660 /* Flush the IO's on the chipq */
2660 2661 (void) emlxs_chipq_node_flush(port,
2661 2662 &hba->chan[hba->channel_fcp],
2662 2663 sbp->node, sbp);
2663 2664 } else {
2664 2665 /* Flush the IO's on the chipq for this lun */
2665 2666 (void) emlxs_chipq_lun_flush(port,
2666 2667 sbp->node, sbp->lun, sbp);
2667 2668 }
2668 2669
2669 2670 if (sbp->flush_count == 0) {
2670 2671 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2671 2672 goto done;
2672 2673 }
2673 2674
2674 2675 /* Set the timeout so the flush has time to complete */
2675 2676 timeout = emlxs_timeout(hba, 60);
2676 2677 (void) drv_getparm(LBOLT, &time);
2677 2678 while ((time < timeout) && sbp->flush_count > 0) {
2678 2679 delay(drv_usectohz(500000));
2679 2680 (void) drv_getparm(LBOLT, &time);
2680 2681 }
2681 2682
2682 2683 if (sbp->flush_count == 0) {
2683 2684 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2684 2685 goto done;
2685 2686 }
2686 2687
2687 2688 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2688 2689 "sbp=%p flush_count=%d. Waiting...", sbp,
2689 2690 sbp->flush_count);
2690 2691
2691 2692 /* Let's try this one more time */
2692 2693
2693 2694 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2694 2695 /* Flush the IO's on the chipq */
2695 2696 (void) emlxs_chipq_node_flush(port,
2696 2697 &hba->chan[hba->channel_fcp],
2697 2698 sbp->node, sbp);
2698 2699 } else {
2699 2700 /* Flush the IO's on the chipq for this lun */
2700 2701 (void) emlxs_chipq_lun_flush(port,
2701 2702 sbp->node, sbp->lun, sbp);
2702 2703 }
2703 2704
2704 2705 /* Reset the timeout so the flush has time to complete */
2705 2706 timeout = emlxs_timeout(hba, 60);
2706 2707 (void) drv_getparm(LBOLT, &time);
2707 2708 while ((time < timeout) && sbp->flush_count > 0) {
2708 2709 delay(drv_usectohz(500000));
2709 2710 (void) drv_getparm(LBOLT, &time);
2710 2711 }
2711 2712
2712 2713 if (sbp->flush_count == 0) {
2713 2714 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2714 2715 goto done;
2715 2716 }
2716 2717
2717 2718 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2718 2719 "sbp=%p flush_count=%d. Resetting link.", sbp,
2719 2720 sbp->flush_count);
2720 2721
2721 2722 /* Let's first try to reset the link */
2722 2723 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
2723 2724
2724 2725 if (sbp->flush_count == 0) {
2725 2726 goto done;
2726 2727 }
2727 2728
2728 2729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2729 2730 "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2730 2731 sbp->flush_count);
2731 2732
2732 2733 /* If that doesn't work, reset the adapter */
2733 2734 (void) emlxs_reset(port, FC_FCA_RESET);
2734 2735
2735 2736 if (sbp->flush_count != 0) {
2736 2737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2737 2738 "sbp=%p flush_count=%d. Giving up.", sbp,
2738 2739 sbp->flush_count);
2739 2740 }
2740 2741
2741 2742 }
2742 2743 /* PACKET_FCP_RESET */
2743 2744 done:
2744 2745
2745 2746 /* Packet has been declared completed and is now ready to be returned */
2746 2747
2747 2748 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2748 2749 emlxs_unswap_pkt(sbp);
2749 2750 #endif /* EMLXS_MODREV2X */
2750 2751
2751 2752 mutex_enter(&sbp->mtx);
2752 2753 sbp->pkt_flags |= PACKET_ULP_OWNED;
2753 2754 mutex_exit(&sbp->mtx);
2754 2755
2755 2756 mutex_enter(&EMLXS_PORT_LOCK);
2756 2757 hba->io_poll_count--;
2757 2758 mutex_exit(&EMLXS_PORT_LOCK);
2758 2759
2759 2760 #ifdef FMA_SUPPORT
2760 2761 if (!in_panic) {
2761 2762 emlxs_check_dma(hba, sbp);
2762 2763 }
2763 2764 #endif
2764 2765
2765 2766 /* Make ULP completion callback if required */
2766 2767 if (pkt->pkt_comp) {
2767 2768 cp->ulpCmplCmd++;
2768 2769 (*pkt->pkt_comp) (pkt);
2769 2770 }
2770 2771
2771 2772 #ifdef FMA_SUPPORT
2772 2773 if (hba->flag & FC_DMA_CHECK_ERROR) {
2773 2774 emlxs_thread_spawn(hba, emlxs_restart_thread,
2774 2775 NULL, NULL);
2775 2776 }
2776 2777 #endif
2777 2778
2778 2779 return;
2779 2780
2780 2781 } /* emlxs_poll() */
2781 2782
2782 2783
2783 2784 static int
2784 2785 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2785 2786 uint32_t *count, uint32_t type)
2786 2787 {
2787 2788 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2788 2789 emlxs_hba_t *hba = HBA;
2789 2790
2790 2791 char *err = NULL;
2791 2792 emlxs_unsol_buf_t *pool;
2792 2793 emlxs_unsol_buf_t *new_pool;
2793 2794 int32_t i;
2794 2795 int result;
2795 2796 uint32_t free_resv;
2796 2797 uint32_t free;
2797 2798 emlxs_config_t *cfg = &CFG;
2798 2799 fc_unsol_buf_t *ubp;
2799 2800 emlxs_ub_priv_t *ub_priv;
2800 2801 int rc;
2801 2802
2802 2803 if (port->tgt_mode) {
2803 2804 if (tokens && count) {
2804 2805 bzero(tokens, (sizeof (uint64_t) * (*count)));
2805 2806 }
2806 2807 return (FC_SUCCESS);
2807 2808 }
2808 2809
2809 2810 if (!(port->flag & EMLXS_PORT_BOUND)) {
2810 2811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2811 2812 "fca_ub_alloc failed: Port not bound! size=%x count=%d "
2812 2813 "type=%x", size, *count, type);
2813 2814
2814 2815 return (FC_FAILURE);
2815 2816 }
2816 2817
2817 2818 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2818 2819 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2819 2820
2820 2821 if (count && (*count > EMLXS_MAX_UBUFS)) {
2821 2822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2822 2823 "fca_ub_alloc failed: Too many unsolicted buffers "
2823 2824 "requested. count=%x", *count);
2824 2825
2825 2826 return (FC_FAILURE);
2826 2827
2827 2828 }
2828 2829
2829 2830 if (tokens == NULL) {
2830 2831 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2831 2832 "fca_ub_alloc failed: Token array is NULL.");
2832 2833
2833 2834 return (FC_FAILURE);
2834 2835 }
2835 2836
2836 2837 /* Clear the token array */
2837 2838 bzero(tokens, (sizeof (uint64_t) * (*count)));
2838 2839
2839 2840 free_resv = 0;
2840 2841 free = *count;
2841 2842 switch (type) {
2842 2843 case FC_TYPE_BASIC_LS:
2843 2844 err = "BASIC_LS";
2844 2845 break;
2845 2846 case FC_TYPE_EXTENDED_LS:
2846 2847 err = "EXTENDED_LS";
2847 2848 free = *count / 2; /* Hold 50% for normal use */
2848 2849 free_resv = *count - free; /* Reserve 50% for RSCN use */
2849 2850 break;
2850 2851 case FC_TYPE_IS8802:
2851 2852 err = "IS8802";
2852 2853 break;
2853 2854 case FC_TYPE_IS8802_SNAP:
2854 2855 err = "IS8802_SNAP";
2855 2856
2856 2857 if (cfg[CFG_NETWORK_ON].current == 0) {
2857 2858 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2858 2859 "fca_ub_alloc failed: IP support is disabled.");
2859 2860
2860 2861 return (FC_FAILURE);
2861 2862 }
2862 2863 break;
2863 2864 case FC_TYPE_SCSI_FCP:
2864 2865 err = "SCSI_FCP";
2865 2866 break;
2866 2867 case FC_TYPE_SCSI_GPP:
2867 2868 err = "SCSI_GPP";
2868 2869 break;
2869 2870 case FC_TYPE_HIPP_FP:
2870 2871 err = "HIPP_FP";
2871 2872 break;
2872 2873 case FC_TYPE_IPI3_MASTER:
2873 2874 err = "IPI3_MASTER";
2874 2875 break;
2875 2876 case FC_TYPE_IPI3_SLAVE:
2876 2877 err = "IPI3_SLAVE";
2877 2878 break;
2878 2879 case FC_TYPE_IPI3_PEER:
2879 2880 err = "IPI3_PEER";
2880 2881 break;
2881 2882 case FC_TYPE_FC_SERVICES:
2882 2883 err = "FC_SERVICES";
2883 2884 break;
2884 2885 }
2885 2886
2886 2887 mutex_enter(&EMLXS_UB_LOCK);
2887 2888
2888 2889 /*
2889 2890 * Walk through the list of the unsolicited buffers
2890 2891 * for this ddiinst of emlx.
2891 2892 */
2892 2893
2893 2894 pool = port->ub_pool;
2894 2895
2895 2896 /*
2896 2897 * The emlxs_fca_ub_alloc() can be called more than once with different
2897 2898 * size. We will reject the call if there are
2898 2899 * duplicate size with the same FC-4 type.
2899 2900 */
2900 2901 while (pool) {
2901 2902 if ((pool->pool_type == type) &&
2902 2903 (pool->pool_buf_size == size)) {
2903 2904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2904 2905 "fca_ub_alloc failed: Unsolicited buffer pool "
2905 2906 "for %s of size 0x%x bytes already exists.",
2906 2907 err, size);
2907 2908
2908 2909 result = FC_FAILURE;
2909 2910 goto fail;
2910 2911 }
2911 2912
2912 2913 pool = pool->pool_next;
2913 2914 }
2914 2915
2915 2916 mutex_exit(&EMLXS_UB_LOCK);
2916 2917
2917 2918 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2918 2919 KM_SLEEP);
2919 2920
2920 2921 new_pool->pool_next = NULL;
2921 2922 new_pool->pool_type = type;
2922 2923 new_pool->pool_buf_size = size;
2923 2924 new_pool->pool_nentries = *count;
2924 2925 new_pool->pool_available = new_pool->pool_nentries;
2925 2926 new_pool->pool_free = free;
2926 2927 new_pool->pool_free_resv = free_resv;
2927 2928 new_pool->fc_ubufs =
2928 2929 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2929 2930
2930 2931 new_pool->pool_first_token = port->ub_count;
2931 2932 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2932 2933
2933 2934 for (i = 0; i < new_pool->pool_nentries; i++) {
2934 2935 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2935 2936 ubp->ub_port_handle = port->ulp_handle;
2936 2937 ubp->ub_token = (uint64_t)((unsigned long)ubp);
2937 2938 ubp->ub_bufsize = size;
2938 2939 ubp->ub_class = FC_TRAN_CLASS3;
2939 2940 ubp->ub_port_private = NULL;
2940 2941 ubp->ub_fca_private =
2941 2942 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2942 2943 KM_SLEEP);
2943 2944
2944 2945 /*
2945 2946 * Initialize emlxs_ub_priv_t
2946 2947 */
2947 2948 ub_priv = ubp->ub_fca_private;
2948 2949 ub_priv->ubp = ubp;
2949 2950 ub_priv->port = port;
2950 2951 ub_priv->flags = EMLXS_UB_FREE;
2951 2952 ub_priv->available = 1;
2952 2953 ub_priv->pool = new_pool;
2953 2954 ub_priv->time = 0;
2954 2955 ub_priv->timeout = 0;
2955 2956 ub_priv->token = port->ub_count;
2956 2957 ub_priv->cmd = 0;
2957 2958
2958 2959 /* Allocate the actual buffer */
2959 2960 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2960 2961
2961 2962
2962 2963 tokens[i] = (uint64_t)((unsigned long)ubp);
2963 2964 port->ub_count++;
2964 2965 }
2965 2966
2966 2967 mutex_enter(&EMLXS_UB_LOCK);
2967 2968
2968 2969 /* Add the pool to the top of the pool list */
2969 2970 new_pool->pool_prev = NULL;
2970 2971 new_pool->pool_next = port->ub_pool;
2971 2972
2972 2973 if (port->ub_pool) {
2973 2974 port->ub_pool->pool_prev = new_pool;
2974 2975 }
2975 2976 port->ub_pool = new_pool;
2976 2977
2977 2978 /* Set the post counts */
2978 2979 if (type == FC_TYPE_IS8802_SNAP) {
2979 2980 MAILBOXQ *mbox;
2980 2981
2981 2982 port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
2982 2983
2983 2984 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
2984 2985 MEM_MBOX, 1))) {
2985 2986 emlxs_mb_config_farp(hba, mbox);
2986 2987 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
2987 2988 mbox, MBX_NOWAIT, 0);
2988 2989 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
2989 2990 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
2990 2991 }
2991 2992 }
2992 2993 port->flag |= EMLXS_PORT_IP_UP;
2993 2994 } else if (type == FC_TYPE_EXTENDED_LS) {
2994 2995 port->ub_post[hba->channel_els] += new_pool->pool_nentries;
2995 2996 } else if (type == FC_TYPE_FC_SERVICES) {
2996 2997 port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
2997 2998 }
2998 2999
2999 3000 mutex_exit(&EMLXS_UB_LOCK);
3000 3001
3001 3002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3002 3003 "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3003 3004 *count, err, size);
3004 3005
3005 3006 return (FC_SUCCESS);
3006 3007
3007 3008 fail:
3008 3009
3009 3010 /* Clean the pool */
3010 3011 for (i = 0; tokens[i] != NULL; i++) {
3011 3012 /* Get the buffer object */
3012 3013 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3013 3014 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3014 3015
3015 3016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3016 3017 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3017 3018 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3018 3019
3019 3020 /* Free the actual buffer */
3020 3021 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3021 3022
3022 3023 /* Free the private area of the buffer object */
3023 3024 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3024 3025
3025 3026 tokens[i] = 0;
3026 3027 port->ub_count--;
3027 3028 }
3028 3029
3029 3030 /* Free the array of buffer objects in the pool */
3030 3031 kmem_free((caddr_t)new_pool->fc_ubufs,
3031 3032 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3032 3033
3033 3034 /* Free the pool object */
3034 3035 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3035 3036
3036 3037 mutex_exit(&EMLXS_UB_LOCK);
3037 3038
3038 3039 return (result);
3039 3040
3040 3041 } /* emlxs_fca_ub_alloc() */
3041 3042
3042 3043
3043 3044 static void
3044 3045 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3045 3046 {
3046 3047 emlxs_hba_t *hba = HBA;
3047 3048 emlxs_ub_priv_t *ub_priv;
3048 3049 fc_packet_t *pkt;
3049 3050 ELS_PKT *els;
3050 3051 uint32_t sid;
3051 3052
3052 3053 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3053 3054
3054 3055 if (hba->state <= FC_LINK_DOWN) {
3055 3056 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3056 3057 return;
3057 3058 }
3058 3059
3059 3060 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3060 3061 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3061 3062 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3062 3063 return;
3063 3064 }
3064 3065
3065 3066 sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3066 3067
3067 3068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3068 3069 "%s dropped: sid=%x. Rejecting.",
3069 3070 emlxs_elscmd_xlate(ub_priv->cmd), sid);
3070 3071
3071 3072 pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3072 3073 pkt->pkt_timeout = (2 * hba->fc_ratov);
3073 3074
3074 3075 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3075 3076 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3076 3077 pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3077 3078 }
3078 3079
3079 3080 /* Build the fc header */
3080 3081 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3081 3082 pkt->pkt_cmd_fhdr.r_ctl =
3082 3083 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3083 3084 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3084 3085 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3085 3086 pkt->pkt_cmd_fhdr.f_ctl =
3086 3087 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3087 3088 pkt->pkt_cmd_fhdr.seq_id = 0;
3088 3089 pkt->pkt_cmd_fhdr.df_ctl = 0;
3089 3090 pkt->pkt_cmd_fhdr.seq_cnt = 0;
3090 3091 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3091 3092 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3092 3093 pkt->pkt_cmd_fhdr.ro = 0;
3093 3094
3094 3095 /* Build the command */
3095 3096 els = (ELS_PKT *) pkt->pkt_cmd;
3096 3097 els->elsCode = 0x01;
3097 3098 els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3098 3099 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3099 3100 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3100 3101 els->un.lsRjt.un.b.vendorUnique = 0x02;
3101 3102
3102 3103 /* Send the pkt later in another thread */
3103 3104 (void) emlxs_pkt_send(pkt, 0);
3104 3105
3105 3106 return;
3106 3107
3107 3108 } /* emlxs_ub_els_reject() */
3108 3109
3109 3110 extern int
3110 3111 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3111 3112 uint64_t tokens[])
3112 3113 {
3113 3114 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3114 3115 emlxs_hba_t *hba = HBA;
3115 3116 fc_unsol_buf_t *ubp;
3116 3117 emlxs_ub_priv_t *ub_priv;
3117 3118 uint32_t i;
3118 3119 uint32_t time;
3119 3120 emlxs_unsol_buf_t *pool;
3120 3121
3121 3122 if (count == 0) {
3122 3123 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3123 3124 "fca_ub_release: Nothing to do. count=%d", count);
3124 3125
3125 3126 return (FC_SUCCESS);
3126 3127 }
3127 3128
3128 3129 if (!(port->flag & EMLXS_PORT_BOUND)) {
3129 3130 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3130 3131 "fca_ub_release failed: Port not bound. count=%d "
3131 3132 "token[0]=%p",
3132 3133 count, tokens[0]);
3133 3134
3134 3135 return (FC_UNBOUND);
3135 3136 }
3136 3137
3137 3138 mutex_enter(&EMLXS_UB_LOCK);
3138 3139
3139 3140 if (!port->ub_pool) {
3140 3141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3141 3142 "fca_ub_release failed: No pools! count=%d token[0]=%p",
3142 3143 count, tokens[0]);
3143 3144
3144 3145 mutex_exit(&EMLXS_UB_LOCK);
3145 3146 return (FC_UB_BADTOKEN);
3146 3147 }
3147 3148
3148 3149 for (i = 0; i < count; i++) {
3149 3150 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3150 3151
3151 3152 if (!ubp) {
3152 3153 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3153 3154 "fca_ub_release failed: count=%d tokens[%d]=0",
3154 3155 count, i);
3155 3156
3156 3157 mutex_exit(&EMLXS_UB_LOCK);
3157 3158 return (FC_UB_BADTOKEN);
3158 3159 }
3159 3160
3160 3161 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3161 3162
3162 3163 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3163 3164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3164 3165 "fca_ub_release failed: Dead buffer found. ubp=%p",
3165 3166 ubp);
3166 3167
3167 3168 mutex_exit(&EMLXS_UB_LOCK);
3168 3169 return (FC_UB_BADTOKEN);
3169 3170 }
3170 3171
3171 3172 if (ub_priv->flags == EMLXS_UB_FREE) {
3172 3173 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3173 3174 "fca_ub_release: Buffer already free! ubp=%p "
3174 3175 "token=%x",
3175 3176 ubp, ub_priv->token);
3176 3177
3177 3178 continue;
3178 3179 }
3179 3180
3180 3181 /* Check for dropped els buffer */
3181 3182 /* ULP will do this sometimes without sending a reply */
3182 3183 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3183 3184 !(ub_priv->flags & EMLXS_UB_REPLY)) {
3184 3185 emlxs_ub_els_reject(port, ubp);
3185 3186 }
3186 3187
3187 3188 /* Mark the buffer free */
3188 3189 ub_priv->flags = EMLXS_UB_FREE;
3189 3190 bzero(ubp->ub_buffer, ubp->ub_bufsize);
3190 3191
3191 3192 time = hba->timer_tics - ub_priv->time;
3192 3193 ub_priv->time = 0;
3193 3194 ub_priv->timeout = 0;
3194 3195
3195 3196 pool = ub_priv->pool;
3196 3197
3197 3198 if (ub_priv->flags & EMLXS_UB_RESV) {
3198 3199 pool->pool_free_resv++;
3199 3200 } else {
3200 3201 pool->pool_free++;
3201 3202 }
3202 3203
3203 3204 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3204 3205 "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3205 3206 "(%d,%d,%d,%d)",
3206 3207 ubp, ub_priv->token, time, ub_priv->available,
3207 3208 pool->pool_nentries, pool->pool_available,
3208 3209 pool->pool_free, pool->pool_free_resv);
3209 3210
3210 3211 /* Check if pool can be destroyed now */
3211 3212 if ((pool->pool_available == 0) &&
3212 3213 (pool->pool_free + pool->pool_free_resv ==
3213 3214 pool->pool_nentries)) {
3214 3215 emlxs_ub_destroy(port, pool);
3215 3216 }
3216 3217 }
3217 3218
3218 3219 mutex_exit(&EMLXS_UB_LOCK);
3219 3220
3220 3221 return (FC_SUCCESS);
3221 3222
3222 3223 } /* emlxs_fca_ub_release() */
3223 3224
3224 3225
3225 3226 static int
3226 3227 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3227 3228 {
3228 3229 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3229 3230 emlxs_unsol_buf_t *pool;
3230 3231 fc_unsol_buf_t *ubp;
3231 3232 emlxs_ub_priv_t *ub_priv;
3232 3233 uint32_t i;
3233 3234
3234 3235 if (port->tgt_mode) {
3235 3236 return (FC_SUCCESS);
3236 3237 }
3237 3238
3238 3239 if (count == 0) {
3239 3240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3240 3241 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3241 3242 tokens[0]);
3242 3243
3243 3244 return (FC_SUCCESS);
3244 3245 }
3245 3246
3246 3247 if (!(port->flag & EMLXS_PORT_BOUND)) {
3247 3248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3248 3249 "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3249 3250 tokens[0]);
3250 3251
3251 3252 return (FC_SUCCESS);
3252 3253 }
3253 3254
3254 3255 mutex_enter(&EMLXS_UB_LOCK);
3255 3256
3256 3257 if (!port->ub_pool) {
3257 3258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3258 3259 "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3259 3260 tokens[0]);
3260 3261
3261 3262 mutex_exit(&EMLXS_UB_LOCK);
3262 3263 return (FC_UB_BADTOKEN);
3263 3264 }
3264 3265
3265 3266 /* Process buffer list */
3266 3267 for (i = 0; i < count; i++) {
3267 3268 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3268 3269
3269 3270 if (!ubp) {
3270 3271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3271 3272 "fca_ub_free failed: count=%d tokens[%d]=0", count,
3272 3273 i);
3273 3274
3274 3275 mutex_exit(&EMLXS_UB_LOCK);
3275 3276 return (FC_UB_BADTOKEN);
3276 3277 }
3277 3278
3278 3279 /* Mark buffer unavailable */
3279 3280 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3280 3281
3281 3282 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3282 3283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3283 3284 "fca_ub_free failed: Dead buffer found. ubp=%p",
3284 3285 ubp);
3285 3286
3286 3287 mutex_exit(&EMLXS_UB_LOCK);
3287 3288 return (FC_UB_BADTOKEN);
3288 3289 }
3289 3290
3290 3291 ub_priv->available = 0;
3291 3292
3292 3293 /* Mark one less buffer available in the parent pool */
3293 3294 pool = ub_priv->pool;
3294 3295
3295 3296 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3296 3297 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3297 3298 ub_priv->token, pool->pool_nentries,
3298 3299 pool->pool_available - 1, pool->pool_free,
3299 3300 pool->pool_free_resv);
3300 3301
3301 3302 if (pool->pool_available) {
3302 3303 pool->pool_available--;
3303 3304
3304 3305 /* Check if pool can be destroyed */
3305 3306 if ((pool->pool_available == 0) &&
3306 3307 (pool->pool_free + pool->pool_free_resv ==
3307 3308 pool->pool_nentries)) {
3308 3309 emlxs_ub_destroy(port, pool);
3309 3310 }
3310 3311 }
3311 3312 }
3312 3313
3313 3314 mutex_exit(&EMLXS_UB_LOCK);
3314 3315
3315 3316 return (FC_SUCCESS);
3316 3317
3317 3318 } /* emlxs_fca_ub_free() */
3318 3319
3319 3320
3320 3321 /* EMLXS_UB_LOCK must be held when calling this routine */
3321 3322 extern void
3322 3323 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3323 3324 {
3324 3325 emlxs_hba_t *hba = HBA;
3325 3326 emlxs_unsol_buf_t *next;
3326 3327 emlxs_unsol_buf_t *prev;
3327 3328 fc_unsol_buf_t *ubp;
3328 3329 uint32_t i;
3329 3330
3330 3331 /* Remove the pool object from the pool list */
3331 3332 next = pool->pool_next;
3332 3333 prev = pool->pool_prev;
3333 3334
3334 3335 if (port->ub_pool == pool) {
3335 3336 port->ub_pool = next;
3336 3337 }
3337 3338
3338 3339 if (prev) {
3339 3340 prev->pool_next = next;
3340 3341 }
3341 3342
3342 3343 if (next) {
3343 3344 next->pool_prev = prev;
3344 3345 }
3345 3346
3346 3347 pool->pool_prev = NULL;
3347 3348 pool->pool_next = NULL;
3348 3349
3349 3350 /* Clear the post counts */
3350 3351 switch (pool->pool_type) {
3351 3352 case FC_TYPE_IS8802_SNAP:
3352 3353 port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3353 3354 break;
3354 3355
3355 3356 case FC_TYPE_EXTENDED_LS:
3356 3357 port->ub_post[hba->channel_els] -= pool->pool_nentries;
3357 3358 break;
3358 3359
3359 3360 case FC_TYPE_FC_SERVICES:
3360 3361 port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3361 3362 break;
3362 3363 }
3363 3364
3364 3365 /* Now free the pool memory */
3365 3366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3366 3367 "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3367 3368 pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3368 3369
3369 3370 /* Process the array of buffer objects in the pool */
3370 3371 for (i = 0; i < pool->pool_nentries; i++) {
3371 3372 /* Get the buffer object */
3372 3373 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3373 3374
3374 3375 /* Free the memory the buffer object represents */
3375 3376 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3376 3377
3377 3378 /* Free the private area of the buffer object */
3378 3379 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3379 3380 }
3380 3381
3381 3382 /* Free the array of buffer objects in the pool */
3382 3383 kmem_free((caddr_t)pool->fc_ubufs,
3383 3384 (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3384 3385
3385 3386 /* Free the pool object */
3386 3387 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3387 3388
3388 3389 return;
3389 3390
3390 3391 } /* emlxs_ub_destroy() */
3391 3392
3392 3393
3393 3394 /*ARGSUSED*/
3394 3395 extern int
3395 3396 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3396 3397 {
3397 3398 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3398 3399 emlxs_hba_t *hba = HBA;
3399 3400 emlxs_config_t *cfg = &CFG;
3400 3401
3401 3402 emlxs_buf_t *sbp;
3402 3403 NODELIST *nlp;
3403 3404 NODELIST *prev_nlp;
3404 3405 uint8_t channelno;
3405 3406 CHANNEL *cp;
3406 3407 clock_t timeout;
3407 3408 clock_t time;
3408 3409 int32_t pkt_ret;
3409 3410 IOCBQ *iocbq;
3410 3411 IOCBQ *next;
3411 3412 IOCBQ *prev;
3412 3413 uint32_t found;
3413 3414 uint32_t att_bit;
3414 3415 uint32_t pass = 0;
3415 3416
3416 3417 sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3417 3418 iocbq = &sbp->iocbq;
3418 3419 nlp = (NODELIST *)sbp->node;
3419 3420 cp = (CHANNEL *)sbp->channel;
3420 3421 channelno = (cp) ? cp->channelno : 0;
3421 3422
3422 3423 if (!(port->flag & EMLXS_PORT_BOUND)) {
3423 3424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3424 3425 "Port not bound.");
3425 3426 return (FC_UNBOUND);
3426 3427 }
3427 3428
3428 3429 if (!(hba->flag & FC_ONLINE_MODE)) {
3429 3430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3430 3431 "Adapter offline.");
3431 3432 return (FC_OFFLINE);
3432 3433 }
3433 3434
3434 3435 /* ULP requires the aborted pkt to be completed */
3435 3436 /* back to ULP before returning from this call. */
3436 3437 /* SUN knows of problems with this call so they suggested that we */
3437 3438 /* always return a FC_FAILURE for this call, until it is worked out. */
3438 3439
3439 3440 /* Check if pkt is no good */
3440 3441 if (!(sbp->pkt_flags & PACKET_VALID) ||
3441 3442 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3442 3443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3443 3444 "Bad sbp. flags=%x", sbp->pkt_flags);
3444 3445 return (FC_FAILURE);
3445 3446 }
3446 3447
3447 3448 /* Tag this now */
3448 3449 /* This will prevent any thread except ours from completing it */
3449 3450 mutex_enter(&sbp->mtx);
3450 3451
3451 3452 /* Check again if we still own this */
3452 3453 if (!(sbp->pkt_flags & PACKET_VALID) ||
3453 3454 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3454 3455 mutex_exit(&sbp->mtx);
3455 3456 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3456 3457 "Bad sbp. flags=%x", sbp->pkt_flags);
3457 3458 return (FC_FAILURE);
3458 3459 }
3459 3460
3460 3461 /* Check if pkt is a real polled command */
3461 3462 if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3462 3463 (sbp->pkt_flags & PACKET_POLLED)) {
3463 3464 mutex_exit(&sbp->mtx);
3464 3465
3465 3466 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3466 3467 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3467 3468 sbp->pkt_flags);
3468 3469 return (FC_FAILURE);
3469 3470 }
3470 3471
3471 3472 sbp->pkt_flags |= PACKET_POLLED;
3472 3473 sbp->pkt_flags |= PACKET_IN_ABORT;
3473 3474
3474 3475 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3475 3476 PACKET_IN_TIMEOUT)) {
3476 3477 mutex_exit(&sbp->mtx);
3477 3478
3478 3479 /* Do nothing, pkt already on its way out */
3479 3480 goto done;
3480 3481 }
3481 3482
3482 3483 mutex_exit(&sbp->mtx);
3483 3484
3484 3485 begin:
3485 3486 pass++;
3486 3487
3487 3488 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3488 3489
3489 3490 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3490 3491 /* Find it on the queue */
3491 3492 found = 0;
3492 3493 if (iocbq->flag & IOCB_PRIORITY) {
3493 3494 /* Search the priority queue */
3494 3495 prev = NULL;
3495 3496 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3496 3497
3497 3498 while (next) {
3498 3499 if (next == iocbq) {
3499 3500 /* Remove it */
3500 3501 if (prev) {
3501 3502 prev->next = iocbq->next;
3502 3503 }
3503 3504
3504 3505 if (nlp->nlp_ptx[channelno].q_last ==
3505 3506 (void *)iocbq) {
3506 3507 nlp->nlp_ptx[channelno].q_last =
3507 3508 (void *)prev;
3508 3509 }
3509 3510
3510 3511 if (nlp->nlp_ptx[channelno].q_first ==
3511 3512 (void *)iocbq) {
3512 3513 nlp->nlp_ptx[channelno].
3513 3514 q_first =
3514 3515 (void *)iocbq->next;
3515 3516 }
3516 3517
3517 3518 nlp->nlp_ptx[channelno].q_cnt--;
3518 3519 iocbq->next = NULL;
3519 3520 found = 1;
3520 3521 break;
3521 3522 }
3522 3523
3523 3524 prev = next;
3524 3525 next = next->next;
3525 3526 }
3526 3527 } else {
3527 3528 /* Search the normal queue */
3528 3529 prev = NULL;
3529 3530 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3530 3531
3531 3532 while (next) {
3532 3533 if (next == iocbq) {
3533 3534 /* Remove it */
3534 3535 if (prev) {
3535 3536 prev->next = iocbq->next;
3536 3537 }
3537 3538
3538 3539 if (nlp->nlp_tx[channelno].q_last ==
3539 3540 (void *)iocbq) {
3540 3541 nlp->nlp_tx[channelno].q_last =
3541 3542 (void *)prev;
3542 3543 }
3543 3544
3544 3545 if (nlp->nlp_tx[channelno].q_first ==
3545 3546 (void *)iocbq) {
3546 3547 nlp->nlp_tx[channelno].q_first =
3547 3548 (void *)iocbq->next;
3548 3549 }
3549 3550
3550 3551 nlp->nlp_tx[channelno].q_cnt--;
3551 3552 iocbq->next = NULL;
3552 3553 found = 1;
3553 3554 break;
3554 3555 }
3555 3556
3556 3557 prev = next;
3557 3558 next = (IOCBQ *) next->next;
3558 3559 }
3559 3560 }
3560 3561
3561 3562 if (!found) {
3562 3563 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3563 3564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3564 3565 "I/O not found in driver. sbp=%p flags=%x", sbp,
3565 3566 sbp->pkt_flags);
3566 3567 goto done;
3567 3568 }
3568 3569
3569 3570 /* Check if node still needs servicing */
3570 3571 if ((nlp->nlp_ptx[channelno].q_first) ||
3571 3572 (nlp->nlp_tx[channelno].q_first &&
3572 3573 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3573 3574
3574 3575 /*
3575 3576 * If this is the base node,
3576 3577 * then don't shift the pointers
3577 3578 */
3578 3579 /* We want to drain the base node before moving on */
3579 3580 if (!nlp->nlp_base) {
3580 3581 /* Just shift channel queue */
3581 3582 /* pointers to next node */
3582 3583 cp->nodeq.q_last = (void *) nlp;
3583 3584 cp->nodeq.q_first = nlp->nlp_next[channelno];
3584 3585 }
3585 3586 } else {
3586 3587 /* Remove node from channel queue */
3587 3588
3588 3589 /* If this is the only node on list */
3589 3590 if (cp->nodeq.q_first == (void *)nlp &&
3590 3591 cp->nodeq.q_last == (void *)nlp) {
3591 3592 cp->nodeq.q_last = NULL;
3592 3593 cp->nodeq.q_first = NULL;
3593 3594 cp->nodeq.q_cnt = 0;
3594 3595 } else if (cp->nodeq.q_first == (void *)nlp) {
3595 3596 cp->nodeq.q_first = nlp->nlp_next[channelno];
3596 3597 ((NODELIST *) cp->nodeq.q_last)->
3597 3598 nlp_next[channelno] = cp->nodeq.q_first;
3598 3599 cp->nodeq.q_cnt--;
3599 3600 } else {
3600 3601 /*
3601 3602 * This is a little more difficult find the
3602 3603 * previous node in the circular channel queue
3603 3604 */
3604 3605 prev_nlp = nlp;
3605 3606 while (prev_nlp->nlp_next[channelno] != nlp) {
3606 3607 prev_nlp = prev_nlp->
3607 3608 nlp_next[channelno];
3608 3609 }
3609 3610
3610 3611 prev_nlp->nlp_next[channelno] =
3611 3612 nlp->nlp_next[channelno];
3612 3613
3613 3614 if (cp->nodeq.q_last == (void *)nlp) {
3614 3615 cp->nodeq.q_last = (void *)prev_nlp;
3615 3616 }
3616 3617 cp->nodeq.q_cnt--;
3617 3618
3618 3619 }
3619 3620
3620 3621 /* Clear node */
3621 3622 nlp->nlp_next[channelno] = NULL;
3622 3623 }
3623 3624
3624 3625 /* Free the ULPIOTAG and the bmp */
3625 3626 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3626 3627 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3627 3628 } else {
3628 3629 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3629 3630 }
3630 3631
3631 3632
3632 3633 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3633 3634
3634 3635 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3635 3636 IOERR_ABORT_REQUESTED, 1);
3636 3637
3637 3638 goto done;
3638 3639 }
3639 3640
3640 3641 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3641 3642
3642 3643
3643 3644 /* Check the chip queue */
3644 3645 mutex_enter(&EMLXS_FCTAB_LOCK);
3645 3646
3646 3647 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3647 3648 !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3648 3649 (sbp == hba->fc_table[sbp->iotag])) {
3649 3650
3650 3651 /* Create the abort IOCB */
3651 3652 if (hba->state >= FC_LINK_UP) {
3652 3653 iocbq =
3653 3654 emlxs_create_abort_xri_cn(port, sbp->node,
3654 3655 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3655 3656
3656 3657 mutex_enter(&sbp->mtx);
3657 3658 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3658 3659 sbp->ticks =
3659 3660 hba->timer_tics + (4 * hba->fc_ratov) + 10;
3660 3661 sbp->abort_attempts++;
3661 3662 mutex_exit(&sbp->mtx);
3662 3663 } else {
3663 3664 iocbq =
3664 3665 emlxs_create_close_xri_cn(port, sbp->node,
3665 3666 sbp->iotag, cp);
3666 3667
3667 3668 mutex_enter(&sbp->mtx);
3668 3669 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3669 3670 sbp->ticks = hba->timer_tics + 30;
3670 3671 sbp->abort_attempts++;
3671 3672 mutex_exit(&sbp->mtx);
3672 3673 }
3673 3674
3674 3675 mutex_exit(&EMLXS_FCTAB_LOCK);
3675 3676
3676 3677 /* Send this iocbq */
3677 3678 if (iocbq) {
3678 3679 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3679 3680 iocbq = NULL;
3680 3681 }
3681 3682
3682 3683 goto done;
3683 3684 }
3684 3685
3685 3686 mutex_exit(&EMLXS_FCTAB_LOCK);
3686 3687
3687 3688 /* Pkt was not on any queues */
3688 3689
3689 3690 /* Check again if we still own this */
3690 3691 if (!(sbp->pkt_flags & PACKET_VALID) ||
3691 3692 (sbp->pkt_flags &
3692 3693 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3693 3694 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3694 3695 goto done;
3695 3696 }
3696 3697
3697 3698 if (!sleep) {
3698 3699 return (FC_FAILURE);
3699 3700 }
3700 3701
3701 3702 /* Apparently the pkt was not found. Let's delay and try again */
3702 3703 if (pass < 5) {
3703 3704 delay(drv_usectohz(5000000)); /* 5 seconds */
3704 3705
3705 3706 /* Check again if we still own this */
3706 3707 if (!(sbp->pkt_flags & PACKET_VALID) ||
3707 3708 (sbp->pkt_flags &
3708 3709 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3709 3710 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3710 3711 goto done;
3711 3712 }
3712 3713
3713 3714 goto begin;
3714 3715 }
3715 3716
3716 3717 force_it:
3717 3718
3718 3719 /* Force the completion now */
3719 3720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3720 3721 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3721 3722
3722 3723 /* Now complete it */
3723 3724 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3724 3725 1);
3725 3726
3726 3727 done:
3727 3728
3728 3729 /* Now wait for the pkt to complete */
3729 3730 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3730 3731 /* Set thread timeout */
3731 3732 timeout = emlxs_timeout(hba, 30);
3732 3733
3733 3734 /* Check for panic situation */
3734 3735 if (ddi_in_panic()) {
3735 3736
3736 3737 /*
3737 3738 * In panic situations there will be one thread with no
3738 3739 * interrrupts (hard or soft) and no timers
3739 3740 */
3740 3741
3741 3742 /*
3742 3743 * We must manually poll everything in this thread
3743 3744 * to keep the driver going.
3744 3745 */
3745 3746
3746 3747 cp = (CHANNEL *)sbp->channel;
3747 3748 switch (cp->channelno) {
3748 3749 case FC_FCP_RING:
3749 3750 att_bit = HA_R0ATT;
3750 3751 break;
3751 3752
3752 3753 case FC_IP_RING:
3753 3754 att_bit = HA_R1ATT;
3754 3755 break;
3755 3756
3756 3757 case FC_ELS_RING:
3757 3758 att_bit = HA_R2ATT;
3758 3759 break;
3759 3760
3760 3761 case FC_CT_RING:
3761 3762 att_bit = HA_R3ATT;
3762 3763 break;
3763 3764 }
3764 3765
3765 3766 /* Keep polling the chip until our IO is completed */
3766 3767 (void) drv_getparm(LBOLT, &time);
3767 3768 while ((time < timeout) &&
3768 3769 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3769 3770 EMLXS_SLI_POLL_INTR(hba, att_bit);
3770 3771 (void) drv_getparm(LBOLT, &time);
3771 3772 }
3772 3773 } else {
3773 3774 /* Wait for IO completion or timeout */
3774 3775 mutex_enter(&EMLXS_PKT_LOCK);
3775 3776 pkt_ret = 0;
3776 3777 while ((pkt_ret != -1) &&
3777 3778 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3778 3779 pkt_ret =
3779 3780 cv_timedwait(&EMLXS_PKT_CV,
3780 3781 &EMLXS_PKT_LOCK, timeout);
3781 3782 }
3782 3783 mutex_exit(&EMLXS_PKT_LOCK);
3783 3784 }
3784 3785
3785 3786 /* Check if timeout occured. This is not good. */
3786 3787 /* Something happened to our IO. */
3787 3788 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3788 3789 /* Force the completion now */
3789 3790 goto force_it;
3790 3791 }
3791 3792 }
3792 3793 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3793 3794 emlxs_unswap_pkt(sbp);
3794 3795 #endif /* EMLXS_MODREV2X */
3795 3796
3796 3797 /* Check again if we still own this */
3797 3798 if ((sbp->pkt_flags & PACKET_VALID) &&
3798 3799 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3799 3800 mutex_enter(&sbp->mtx);
3800 3801 if ((sbp->pkt_flags & PACKET_VALID) &&
3801 3802 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3802 3803 sbp->pkt_flags |= PACKET_ULP_OWNED;
3803 3804 }
3804 3805 mutex_exit(&sbp->mtx);
3805 3806 }
3806 3807
3807 3808 #ifdef ULP_PATCH5
3808 3809 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3809 3810 return (FC_FAILURE);
3810 3811 }
3811 3812 #endif /* ULP_PATCH5 */
3812 3813
3813 3814 return (FC_SUCCESS);
3814 3815
3815 3816 } /* emlxs_fca_pkt_abort() */
3816 3817
3817 3818
3818 3819 static void
3819 3820 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3820 3821 {
3821 3822 emlxs_port_t *port = &PPORT;
3822 3823 fc_packet_t *pkt;
3823 3824 emlxs_buf_t *sbp;
3824 3825 uint32_t i;
3825 3826 uint32_t flg;
3826 3827 uint32_t rc;
3827 3828 uint32_t txcnt;
3828 3829 uint32_t chipcnt;
3829 3830
3830 3831 txcnt = 0;
3831 3832 chipcnt = 0;
3832 3833
3833 3834 mutex_enter(&EMLXS_FCTAB_LOCK);
3834 3835 for (i = 0; i < hba->max_iotag; i++) {
3835 3836 sbp = hba->fc_table[i];
3836 3837 if (sbp == NULL || sbp == STALE_PACKET) {
3837 3838 continue;
3838 3839 }
3839 3840 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ);
3840 3841 pkt = PRIV2PKT(sbp);
3841 3842 mutex_exit(&EMLXS_FCTAB_LOCK);
3842 3843 rc = emlxs_fca_pkt_abort(port, pkt, 0);
3843 3844 if (rc == FC_SUCCESS) {
3844 3845 if (flg) {
3845 3846 chipcnt++;
3846 3847 } else {
3847 3848 txcnt++;
3848 3849 }
3849 3850 }
3850 3851 mutex_enter(&EMLXS_FCTAB_LOCK);
3851 3852 }
3852 3853 mutex_exit(&EMLXS_FCTAB_LOCK);
3853 3854 *tx = txcnt;
3854 3855 *chip = chipcnt;
3855 3856 } /* emlxs_abort_all() */
3856 3857
3857 3858
3858 3859 extern int32_t
3859 3860 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3860 3861 {
3861 3862 emlxs_hba_t *hba = HBA;
3862 3863 int rval;
3863 3864 int ret;
3864 3865 clock_t timeout;
3865 3866
3866 3867 switch (cmd) {
3867 3868 case FC_FCA_LINK_RESET:
3868 3869
3869 3870 if (!(hba->flag & FC_ONLINE_MODE) ||
3870 3871 (hba->state <= FC_LINK_DOWN)) {
3871 3872 return (FC_SUCCESS);
3872 3873 }
3873 3874
3874 3875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3875 3876 "Resetting Link.");
3876 3877
3877 3878 mutex_enter(&EMLXS_LINKUP_LOCK);
3878 3879 hba->linkup_wait_flag = TRUE;
3879 3880 mutex_exit(&EMLXS_LINKUP_LOCK);
3880 3881
3881 3882 if (emlxs_reset_link(hba, 1, 1)) {
3882 3883 mutex_enter(&EMLXS_LINKUP_LOCK);
3883 3884 hba->linkup_wait_flag = FALSE;
3884 3885 mutex_exit(&EMLXS_LINKUP_LOCK);
3885 3886
3886 3887 return (FC_FAILURE);
3887 3888 }
3888 3889
3889 3890 mutex_enter(&EMLXS_LINKUP_LOCK);
3890 3891 timeout = emlxs_timeout(hba, 60);
3891 3892 ret = 0;
3892 3893 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3893 3894 ret =
3894 3895 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3895 3896 timeout);
3896 3897 }
3897 3898
3898 3899 hba->linkup_wait_flag = FALSE;
3899 3900 mutex_exit(&EMLXS_LINKUP_LOCK);
3900 3901
3901 3902 if (ret == -1) {
3902 3903 return (FC_FAILURE);
3903 3904 }
3904 3905
3905 3906 return (FC_SUCCESS);
3906 3907
3907 3908 case FC_FCA_CORE:
3908 3909 #ifdef DUMP_SUPPORT
3909 3910 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3910 3911 "Dumping Core.");
3911 3912
3912 3913 /* Schedule a USER dump */
3913 3914 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3914 3915
3915 3916 /* Wait for dump to complete */
3916 3917 emlxs_dump_wait(hba);
3917 3918
3918 3919 return (FC_SUCCESS);
3919 3920 #endif /* DUMP_SUPPORT */
3920 3921
3921 3922 case FC_FCA_RESET:
3922 3923 case FC_FCA_RESET_CORE:
3923 3924
3924 3925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3925 3926 "Resetting Adapter.");
3926 3927
3927 3928 rval = FC_SUCCESS;
3928 3929
3929 3930 if (emlxs_offline(hba) == 0) {
3930 3931 (void) emlxs_online(hba);
3931 3932 } else {
3932 3933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3933 3934 "Adapter reset failed. Device busy.");
3934 3935
3935 3936 rval = FC_DEVICE_BUSY;
3936 3937 }
3937 3938
3938 3939 return (rval);
3939 3940
3940 3941 default:
3941 3942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3942 3943 "emlxs_reset: Unknown command. cmd=%x", cmd);
3943 3944
3944 3945 break;
3945 3946 }
3946 3947
3947 3948 return (FC_FAILURE);
3948 3949
3949 3950 } /* emlxs_reset() */
3950 3951
3951 3952
3952 3953 extern int32_t
3953 3954 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
3954 3955 {
3955 3956 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3956 3957 emlxs_hba_t *hba = HBA;
3957 3958 int32_t rval;
3958 3959
3959 3960 if (!(port->flag & EMLXS_PORT_BOUND)) {
3960 3961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3961 3962 "fca_reset: Port not bound.");
3962 3963
3963 3964 return (FC_UNBOUND);
3964 3965 }
3965 3966
3966 3967 switch (cmd) {
3967 3968 case FC_FCA_LINK_RESET:
3968 3969 if (hba->fw_flag & FW_UPDATE_NEEDED) {
3969 3970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3970 3971 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
3971 3972 cmd = FC_FCA_RESET;
3972 3973 } else {
3973 3974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3974 3975 "fca_reset: FC_FCA_LINK_RESET");
3975 3976 }
3976 3977 break;
3977 3978
3978 3979 case FC_FCA_CORE:
3979 3980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3980 3981 "fca_reset: FC_FCA_CORE");
3981 3982 break;
3982 3983
3983 3984 case FC_FCA_RESET:
3984 3985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3985 3986 "fca_reset: FC_FCA_RESET");
3986 3987 break;
3987 3988
3988 3989 case FC_FCA_RESET_CORE:
3989 3990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3990 3991 "fca_reset: FC_FCA_RESET_CORE");
3991 3992 break;
3992 3993
3993 3994 default:
3994 3995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3995 3996 "fca_reset: Unknown command. cmd=%x", cmd);
3996 3997 return (FC_FAILURE);
3997 3998 }
3998 3999
3999 4000 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4000 4001 hba->fw_flag |= FW_UPDATE_KERNEL;
4001 4002 }
4002 4003
4003 4004 rval = emlxs_reset(port, cmd);
4004 4005
4005 4006 return (rval);
4006 4007
4007 4008 } /* emlxs_fca_reset() */
4008 4009
4009 4010
4010 4011 extern int
4011 4012 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4012 4013 {
4013 4014 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4014 4015 emlxs_hba_t *hba = HBA;
4015 4016 int32_t ret;
4016 4017 emlxs_vpd_t *vpd = &VPD;
4017 4018
4018 4019
4019 4020 ret = FC_SUCCESS;
4020 4021
4021 4022 if (!(port->flag & EMLXS_PORT_BOUND)) {
4022 4023 return (FC_UNBOUND);
4023 4024 }
4024 4025
4025 4026
4026 4027 #ifdef IDLE_TIMER
4027 4028 emlxs_pm_busy_component(hba);
4028 4029 #endif /* IDLE_TIMER */
4029 4030
4030 4031 switch (pm->pm_cmd_code) {
4031 4032
4032 4033 case FC_PORT_GET_FW_REV:
4033 4034 {
4034 4035 char buffer[128];
4035 4036
4036 4037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4037 4038 "fca_port_manage: FC_PORT_GET_FW_REV");
4038 4039
4039 4040 (void) sprintf(buffer, "%s %s", hba->model_info.model,
4040 4041 vpd->fw_version);
4041 4042 bzero(pm->pm_data_buf, pm->pm_data_len);
4042 4043
4043 4044 if (pm->pm_data_len < strlen(buffer) + 1) {
4044 4045 ret = FC_NOMEM;
4045 4046
4046 4047 break;
4047 4048 }
4048 4049
4049 4050 (void) strcpy(pm->pm_data_buf, buffer);
4050 4051 break;
4051 4052 }
4052 4053
4053 4054 case FC_PORT_GET_FCODE_REV:
4054 4055 {
4055 4056 char buffer[128];
4056 4057
4057 4058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4058 4059 "fca_port_manage: FC_PORT_GET_FCODE_REV");
4059 4060
4060 4061 /* Force update here just to be sure */
4061 4062 emlxs_get_fcode_version(hba);
4062 4063
4063 4064 (void) sprintf(buffer, "%s %s", hba->model_info.model,
4064 4065 vpd->fcode_version);
4065 4066 bzero(pm->pm_data_buf, pm->pm_data_len);
4066 4067
4067 4068 if (pm->pm_data_len < strlen(buffer) + 1) {
4068 4069 ret = FC_NOMEM;
4069 4070 break;
4070 4071 }
4071 4072
4072 4073 (void) strcpy(pm->pm_data_buf, buffer);
4073 4074 break;
4074 4075 }
4075 4076
4076 4077 case FC_PORT_GET_DUMP_SIZE:
4077 4078 {
4078 4079 #ifdef DUMP_SUPPORT
4079 4080 uint32_t dump_size = 0;
4080 4081
4081 4082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4082 4083 "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4083 4084
4084 4085 if (pm->pm_data_len < sizeof (uint32_t)) {
4085 4086 ret = FC_NOMEM;
4086 4087 break;
4087 4088 }
4088 4089
4089 4090 (void) emlxs_get_dump(hba, NULL, &dump_size);
4090 4091
4091 4092 *((uint32_t *)pm->pm_data_buf) = dump_size;
4092 4093
4093 4094 #else
4094 4095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4095 4096 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4096 4097
4097 4098 #endif /* DUMP_SUPPORT */
4098 4099
4099 4100 break;
4100 4101 }
4101 4102
4102 4103 case FC_PORT_GET_DUMP:
4103 4104 {
4104 4105 #ifdef DUMP_SUPPORT
4105 4106 uint32_t dump_size = 0;
4106 4107
4107 4108 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4108 4109 "fca_port_manage: FC_PORT_GET_DUMP");
4109 4110
4110 4111 (void) emlxs_get_dump(hba, NULL, &dump_size);
4111 4112
4112 4113 if (pm->pm_data_len < dump_size) {
4113 4114 ret = FC_NOMEM;
4114 4115 break;
4115 4116 }
4116 4117
4117 4118 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4118 4119 (uint32_t *)&dump_size);
4119 4120 #else
4120 4121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4121 4122 "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4122 4123
4123 4124 #endif /* DUMP_SUPPORT */
4124 4125
4125 4126 break;
4126 4127 }
4127 4128
4128 4129 case FC_PORT_FORCE_DUMP:
4129 4130 {
4130 4131 #ifdef DUMP_SUPPORT
4131 4132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4132 4133 "fca_port_manage: FC_PORT_FORCE_DUMP");
4133 4134
4134 4135 /* Schedule a USER dump */
4135 4136 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4136 4137
4137 4138 /* Wait for dump to complete */
4138 4139 emlxs_dump_wait(hba);
4139 4140 #else
4140 4141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4141 4142 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4142 4143
4143 4144 #endif /* DUMP_SUPPORT */
4144 4145 break;
4145 4146 }
4146 4147
4147 4148 case FC_PORT_LINK_STATE:
4148 4149 {
4149 4150 uint32_t *link_state;
4150 4151
4151 4152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4152 4153 "fca_port_manage: FC_PORT_LINK_STATE");
4153 4154
4154 4155 if (pm->pm_stat_len != sizeof (*link_state)) {
4155 4156 ret = FC_NOMEM;
4156 4157 break;
4157 4158 }
4158 4159
4159 4160 if (pm->pm_cmd_buf != NULL) {
4160 4161 /*
4161 4162 * Can't look beyond the FCA port.
4162 4163 */
4163 4164 ret = FC_INVALID_REQUEST;
4164 4165 break;
4165 4166 }
4166 4167
4167 4168 link_state = (uint32_t *)pm->pm_stat_buf;
4168 4169
4169 4170 /* Set the state */
4170 4171 if (hba->state >= FC_LINK_UP) {
4171 4172 /* Check for loop topology */
4172 4173 if (hba->topology == TOPOLOGY_LOOP) {
4173 4174 *link_state = FC_STATE_LOOP;
4174 4175 } else {
4175 4176 *link_state = FC_STATE_ONLINE;
4176 4177 }
4177 4178
4178 4179 /* Set the link speed */
4179 4180 switch (hba->linkspeed) {
4180 4181 case LA_2GHZ_LINK:
4181 4182 *link_state |= FC_STATE_2GBIT_SPEED;
4182 4183 break;
4183 4184 case LA_4GHZ_LINK:
4184 4185 *link_state |= FC_STATE_4GBIT_SPEED;
4185 4186 break;
4186 4187 case LA_8GHZ_LINK:
4187 4188 *link_state |= FC_STATE_8GBIT_SPEED;
4188 4189 break;
4189 4190 case LA_10GHZ_LINK:
4190 4191 *link_state |= FC_STATE_10GBIT_SPEED;
4191 4192 break;
4192 4193 case LA_1GHZ_LINK:
4193 4194 default:
4194 4195 *link_state |= FC_STATE_1GBIT_SPEED;
4195 4196 break;
4196 4197 }
4197 4198 } else {
4198 4199 *link_state = FC_STATE_OFFLINE;
4199 4200 }
4200 4201
4201 4202 break;
4202 4203 }
4203 4204
4204 4205
4205 4206 case FC_PORT_ERR_STATS:
4206 4207 case FC_PORT_RLS:
4207 4208 {
4208 4209 MAILBOXQ *mbq;
4209 4210 MAILBOX *mb;
4210 4211 fc_rls_acc_t *bp;
4211 4212
4212 4213 if (!(hba->flag & FC_ONLINE_MODE)) {
4213 4214 return (FC_OFFLINE);
4214 4215 }
4215 4216 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4216 4217 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4217 4218
4218 4219 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4219 4220 ret = FC_NOMEM;
4220 4221 break;
4221 4222 }
4222 4223
4223 4224 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4224 4225 MEM_MBOX, 1)) == 0) {
4225 4226 ret = FC_NOMEM;
4226 4227 break;
4227 4228 }
4228 4229 mb = (MAILBOX *)mbq;
4229 4230
4230 4231 emlxs_mb_read_lnk_stat(hba, mbq);
4231 4232 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4232 4233 != MBX_SUCCESS) {
4233 4234 ret = FC_PBUSY;
4234 4235 } else {
4235 4236 bp = (fc_rls_acc_t *)pm->pm_data_buf;
4236 4237
4237 4238 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4238 4239 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4239 4240 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4240 4241 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4241 4242 bp->rls_invalid_word =
4242 4243 mb->un.varRdLnk.invalidXmitWord;
4243 4244 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4244 4245 }
4245 4246
4246 4247 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4247 4248 break;
4248 4249 }
4249 4250
4250 4251 case FC_PORT_DOWNLOAD_FW:
4251 4252 if (!(hba->flag & FC_ONLINE_MODE)) {
4252 4253 return (FC_OFFLINE);
4253 4254 }
4254 4255 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4255 4256 "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4256 4257 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4257 4258 pm->pm_data_len, 1);
4258 4259 break;
4259 4260
4260 4261 case FC_PORT_DOWNLOAD_FCODE:
4261 4262 if (!(hba->flag & FC_ONLINE_MODE)) {
4262 4263 return (FC_OFFLINE);
4263 4264 }
4264 4265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4265 4266 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4266 4267 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4267 4268 pm->pm_data_len, 1);
4268 4269 break;
4269 4270
4270 4271 case FC_PORT_DIAG:
4271 4272 {
4272 4273 uint32_t errno = 0;
4273 4274 uint32_t did = 0;
4274 4275 uint32_t pattern = 0;
4275 4276
4276 4277 switch (pm->pm_cmd_flags) {
4277 4278 case EMLXS_DIAG_BIU:
4278 4279
4279 4280 if (!(hba->flag & FC_ONLINE_MODE)) {
4280 4281 return (FC_OFFLINE);
4281 4282 }
4282 4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4283 4284 "fca_port_manage: EMLXS_DIAG_BIU");
4284 4285
4285 4286 if (pm->pm_data_len) {
4286 4287 pattern = *((uint32_t *)pm->pm_data_buf);
4287 4288 }
4288 4289
4289 4290 errno = emlxs_diag_biu_run(hba, pattern);
4290 4291
4291 4292 if (pm->pm_stat_len == sizeof (errno)) {
4292 4293 *(int *)pm->pm_stat_buf = errno;
4293 4294 }
4294 4295
4295 4296 break;
4296 4297
4297 4298
4298 4299 case EMLXS_DIAG_POST:
4299 4300
4300 4301 if (!(hba->flag & FC_ONLINE_MODE)) {
4301 4302 return (FC_OFFLINE);
4302 4303 }
4303 4304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4304 4305 "fca_port_manage: EMLXS_DIAG_POST");
4305 4306
4306 4307 errno = emlxs_diag_post_run(hba);
4307 4308
4308 4309 if (pm->pm_stat_len == sizeof (errno)) {
4309 4310 *(int *)pm->pm_stat_buf = errno;
4310 4311 }
4311 4312
4312 4313 break;
4313 4314
4314 4315
4315 4316 case EMLXS_DIAG_ECHO:
4316 4317
4317 4318 if (!(hba->flag & FC_ONLINE_MODE)) {
4318 4319 return (FC_OFFLINE);
4319 4320 }
4320 4321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4321 4322 "fca_port_manage: EMLXS_DIAG_ECHO");
4322 4323
4323 4324 if (pm->pm_cmd_len != sizeof (uint32_t)) {
4324 4325 ret = FC_INVALID_REQUEST;
4325 4326 break;
4326 4327 }
4327 4328
4328 4329 did = *((uint32_t *)pm->pm_cmd_buf);
4329 4330
4330 4331 if (pm->pm_data_len) {
4331 4332 pattern = *((uint32_t *)pm->pm_data_buf);
4332 4333 }
4333 4334
4334 4335 errno = emlxs_diag_echo_run(port, did, pattern);
4335 4336
4336 4337 if (pm->pm_stat_len == sizeof (errno)) {
4337 4338 *(int *)pm->pm_stat_buf = errno;
4338 4339 }
4339 4340
4340 4341 break;
4341 4342
4342 4343
4343 4344 case EMLXS_PARM_GET_NUM:
4344 4345 {
4345 4346 uint32_t *num;
4346 4347 emlxs_config_t *cfg;
4347 4348 uint32_t i;
4348 4349 uint32_t count;
4349 4350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4350 4351 "fca_port_manage: EMLXS_PARM_GET_NUM");
4351 4352
4352 4353 if (pm->pm_stat_len < sizeof (uint32_t)) {
4353 4354 ret = FC_NOMEM;
4354 4355 break;
4355 4356 }
4356 4357
4357 4358 num = (uint32_t *)pm->pm_stat_buf;
4358 4359 count = 0;
4359 4360 cfg = &CFG;
4360 4361 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4361 4362 if (!(cfg->flags & PARM_HIDDEN)) {
4362 4363 count++;
4363 4364 }
4364 4365
4365 4366 }
4366 4367
4367 4368 *num = count;
4368 4369
4369 4370 break;
4370 4371 }
4371 4372
4372 4373 case EMLXS_PARM_GET_LIST:
4373 4374 {
4374 4375 emlxs_parm_t *parm;
4375 4376 emlxs_config_t *cfg;
4376 4377 uint32_t i;
4377 4378 uint32_t max_count;
4378 4379
4379 4380 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4380 4381 "fca_port_manage: EMLXS_PARM_GET_LIST");
4381 4382
4382 4383 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4383 4384 ret = FC_NOMEM;
4384 4385 break;
4385 4386 }
4386 4387
4387 4388 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4388 4389
4389 4390 parm = (emlxs_parm_t *)pm->pm_stat_buf;
4390 4391 cfg = &CFG;
4391 4392 for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4392 4393 cfg++) {
4393 4394 if (!(cfg->flags & PARM_HIDDEN)) {
4394 4395 (void) strcpy(parm->label, cfg->string);
4395 4396 parm->min = cfg->low;
4396 4397 parm->max = cfg->hi;
4397 4398 parm->def = cfg->def;
4398 4399 parm->current = cfg->current;
4399 4400 parm->flags = cfg->flags;
4400 4401 (void) strcpy(parm->help, cfg->help);
4401 4402 parm++;
4402 4403 max_count--;
4403 4404 }
4404 4405 }
4405 4406
4406 4407 break;
4407 4408 }
4408 4409
4409 4410 case EMLXS_PARM_GET:
4410 4411 {
4411 4412 emlxs_parm_t *parm_in;
4412 4413 emlxs_parm_t *parm_out;
4413 4414 emlxs_config_t *cfg;
4414 4415 uint32_t i;
4415 4416 uint32_t len;
4416 4417
4417 4418 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4418 4419 EMLXS_MSGF(EMLXS_CONTEXT,
4419 4420 &emlxs_sfs_debug_msg,
4420 4421 "fca_port_manage: EMLXS_PARM_GET. "
4421 4422 "inbuf too small.");
4422 4423
4423 4424 ret = FC_BADCMD;
4424 4425 break;
4425 4426 }
4426 4427
4427 4428 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4428 4429 EMLXS_MSGF(EMLXS_CONTEXT,
4429 4430 &emlxs_sfs_debug_msg,
4430 4431 "fca_port_manage: EMLXS_PARM_GET. "
4431 4432 "outbuf too small");
4432 4433
4433 4434 ret = FC_BADCMD;
4434 4435 break;
4435 4436 }
4436 4437
4437 4438 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4438 4439 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4439 4440 len = strlen(parm_in->label);
4440 4441 cfg = &CFG;
4441 4442 ret = FC_BADOBJECT;
4442 4443
4443 4444 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4444 4445 "fca_port_manage: EMLXS_PARM_GET: %s",
4445 4446 parm_in->label);
4446 4447
4447 4448 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4448 4449 if (len == strlen(cfg->string) &&
4449 4450 (strcmp(parm_in->label,
4450 4451 cfg->string) == 0)) {
4451 4452 (void) strcpy(parm_out->label,
4452 4453 cfg->string);
4453 4454 parm_out->min = cfg->low;
4454 4455 parm_out->max = cfg->hi;
4455 4456 parm_out->def = cfg->def;
4456 4457 parm_out->current = cfg->current;
4457 4458 parm_out->flags = cfg->flags;
4458 4459 (void) strcpy(parm_out->help,
4459 4460 cfg->help);
4460 4461
4461 4462 ret = FC_SUCCESS;
4462 4463 break;
4463 4464 }
4464 4465 }
4465 4466
4466 4467 break;
4467 4468 }
4468 4469
4469 4470 case EMLXS_PARM_SET:
4470 4471 {
4471 4472 emlxs_parm_t *parm_in;
4472 4473 emlxs_parm_t *parm_out;
4473 4474 emlxs_config_t *cfg;
4474 4475 uint32_t i;
4475 4476 uint32_t len;
4476 4477
4477 4478 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4478 4479 EMLXS_MSGF(EMLXS_CONTEXT,
4479 4480 &emlxs_sfs_debug_msg,
4480 4481 "fca_port_manage: EMLXS_PARM_GET. "
4481 4482 "inbuf too small.");
4482 4483
4483 4484 ret = FC_BADCMD;
4484 4485 break;
4485 4486 }
4486 4487
4487 4488 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4488 4489 EMLXS_MSGF(EMLXS_CONTEXT,
4489 4490 &emlxs_sfs_debug_msg,
4490 4491 "fca_port_manage: EMLXS_PARM_GET. "
4491 4492 "outbuf too small");
4492 4493 ret = FC_BADCMD;
4493 4494 break;
4494 4495 }
4495 4496
4496 4497 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4497 4498 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4498 4499 len = strlen(parm_in->label);
4499 4500 cfg = &CFG;
4500 4501 ret = FC_BADOBJECT;
4501 4502
4502 4503 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4503 4504 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d",
4504 4505 parm_in->label, parm_in->current,
4505 4506 parm_in->current);
4506 4507
4507 4508 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4508 4509 /* Find matching parameter string */
4509 4510 if (len == strlen(cfg->string) &&
4510 4511 (strcmp(parm_in->label,
4511 4512 cfg->string) == 0)) {
4512 4513 /* Attempt to update parameter */
4513 4514 if (emlxs_set_parm(hba, i,
4514 4515 parm_in->current) == FC_SUCCESS) {
4515 4516 (void) strcpy(parm_out->label,
4516 4517 cfg->string);
4517 4518 parm_out->min = cfg->low;
4518 4519 parm_out->max = cfg->hi;
4519 4520 parm_out->def = cfg->def;
4520 4521 parm_out->current =
4521 4522 cfg->current;
4522 4523 parm_out->flags = cfg->flags;
4523 4524 (void) strcpy(parm_out->help,
4524 4525 cfg->help);
4525 4526
4526 4527 ret = FC_SUCCESS;
4527 4528 }
4528 4529
4529 4530 break;
4530 4531 }
4531 4532 }
4532 4533
4533 4534 break;
4534 4535 }
4535 4536
4536 4537 case EMLXS_LOG_GET:
4537 4538 {
4538 4539 emlxs_log_req_t *req;
4539 4540 emlxs_log_resp_t *resp;
4540 4541 uint32_t len;
4541 4542
4542 4543 /* Check command size */
4543 4544 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4544 4545 ret = FC_BADCMD;
4545 4546 break;
4546 4547 }
4547 4548
4548 4549 /* Get the request */
4549 4550 req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4550 4551
4551 4552 /* Calculate the response length from the request */
4552 4553 len = sizeof (emlxs_log_resp_t) +
4553 4554 (req->count * MAX_LOG_MSG_LENGTH);
4554 4555
4555 4556 /* Check the response buffer length */
4556 4557 if (pm->pm_stat_len < len) {
4557 4558 ret = FC_BADCMD;
4558 4559 break;
4559 4560 }
4560 4561
4561 4562 /* Get the response pointer */
4562 4563 resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4563 4564
4564 4565 /* Get the request log enties */
4565 4566 (void) emlxs_msg_log_get(hba, req, resp);
4566 4567
4567 4568 ret = FC_SUCCESS;
4568 4569 break;
4569 4570 }
4570 4571
4571 4572 case EMLXS_GET_BOOT_REV:
4572 4573 {
4573 4574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4574 4575 "fca_port_manage: EMLXS_GET_BOOT_REV");
4575 4576
4576 4577 if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4577 4578 ret = FC_NOMEM;
4578 4579 break;
4579 4580 }
4580 4581
4581 4582 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4582 4583 (void) sprintf(pm->pm_stat_buf, "%s %s",
4583 4584 hba->model_info.model, vpd->boot_version);
4584 4585
4585 4586 break;
4586 4587 }
4587 4588
4588 4589 case EMLXS_DOWNLOAD_BOOT:
4589 4590 if (!(hba->flag & FC_ONLINE_MODE)) {
4590 4591 return (FC_OFFLINE);
4591 4592 }
4592 4593 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4593 4594 "fca_port_manage: EMLXS_DOWNLOAD_BOOT");
4594 4595
4595 4596 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4596 4597 pm->pm_data_len, 1);
4597 4598 break;
4598 4599
4599 4600 case EMLXS_DOWNLOAD_CFL:
4600 4601 {
4601 4602 uint32_t *buffer;
4602 4603 uint32_t region;
4603 4604 uint32_t length;
4604 4605
4605 4606 if (!(hba->flag & FC_ONLINE_MODE)) {
4606 4607 return (FC_OFFLINE);
4607 4608 }
4608 4609
4609 4610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4610 4611 "fca_port_manage: EMLXS_DOWNLOAD_CFL");
4611 4612
4612 4613 /* Extract the region number from the first word. */
4613 4614 buffer = (uint32_t *)pm->pm_data_buf;
4614 4615 region = *buffer++;
4615 4616
4616 4617 /* Adjust the image length for the header word */
4617 4618 length = pm->pm_data_len - 4;
4618 4619
4619 4620 ret =
4620 4621 emlxs_cfl_download(hba, region, (caddr_t)buffer,
4621 4622 length);
4622 4623 break;
4623 4624 }
4624 4625
4625 4626 case EMLXS_VPD_GET:
4626 4627 {
4627 4628 emlxs_vpd_desc_t *vpd_out;
4628 4629
4629 4630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4630 4631 "fca_port_manage: EMLXS_VPD_GET");
4631 4632
4632 4633 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4633 4634 ret = FC_BADCMD;
4634 4635 break;
4635 4636 }
4636 4637
4637 4638 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4638 4639 bzero(vpd_out, sizeof (emlxs_vpd_desc_t));
4639 4640
4640 4641 (void) strncpy(vpd_out->id, vpd->id,
4641 4642 sizeof (vpd_out->id));
4642 4643 (void) strncpy(vpd_out->part_num, vpd->part_num,
4643 4644 sizeof (vpd_out->part_num));
4644 4645 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4645 4646 sizeof (vpd_out->eng_change));
4646 4647 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4647 4648 sizeof (vpd_out->manufacturer));
4648 4649 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4649 4650 sizeof (vpd_out->serial_num));
4650 4651 (void) strncpy(vpd_out->model, vpd->model,
4651 4652 sizeof (vpd_out->model));
4652 4653 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4653 4654 sizeof (vpd_out->model_desc));
4654 4655 (void) strncpy(vpd_out->port_num, vpd->port_num,
4655 4656 sizeof (vpd_out->port_num));
4656 4657 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4657 4658 sizeof (vpd_out->prog_types));
4658 4659
4659 4660 ret = FC_SUCCESS;
4660 4661
4661 4662 break;
4662 4663 }
4663 4664
4664 4665 case EMLXS_GET_FCIO_REV:
4665 4666 {
4666 4667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4667 4668 "fca_port_manage: EMLXS_GET_FCIO_REV");
4668 4669
4669 4670 if (pm->pm_stat_len < sizeof (uint32_t)) {
4670 4671 ret = FC_NOMEM;
4671 4672 break;
4672 4673 }
4673 4674
4674 4675 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4675 4676 *(uint32_t *)pm->pm_stat_buf = FCIO_REV;
4676 4677
4677 4678 break;
4678 4679 }
4679 4680
4680 4681 case EMLXS_GET_DFC_REV:
4681 4682 {
4682 4683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4683 4684 "fca_port_manage: EMLXS_GET_DFC_REV");
4684 4685
4685 4686 if (pm->pm_stat_len < sizeof (uint32_t)) {
4686 4687 ret = FC_NOMEM;
4687 4688 break;
4688 4689 }
4689 4690
4690 4691 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4691 4692 *(uint32_t *)pm->pm_stat_buf = DFC_REV;
4692 4693
4693 4694 break;
4694 4695 }
4695 4696
4696 4697 case EMLXS_SET_BOOT_STATE:
4697 4698 case EMLXS_SET_BOOT_STATE_old:
4698 4699 {
4699 4700 uint32_t state;
4700 4701
4701 4702 if (!(hba->flag & FC_ONLINE_MODE)) {
4702 4703 return (FC_OFFLINE);
4703 4704 }
4704 4705 if (pm->pm_cmd_len < sizeof (uint32_t)) {
4705 4706 EMLXS_MSGF(EMLXS_CONTEXT,
4706 4707 &emlxs_sfs_debug_msg,
4707 4708 "fca_port_manage: EMLXS_SET_BOOT_STATE");
4708 4709 ret = FC_BADCMD;
4709 4710 break;
4710 4711 }
4711 4712
4712 4713 state = *(uint32_t *)pm->pm_cmd_buf;
4713 4714
4714 4715 if (state == 0) {
4715 4716 EMLXS_MSGF(EMLXS_CONTEXT,
4716 4717 &emlxs_sfs_debug_msg,
4717 4718 "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4718 4719 "Disable");
4719 4720 ret = emlxs_boot_code_disable(hba);
4720 4721 } else {
4721 4722 EMLXS_MSGF(EMLXS_CONTEXT,
4722 4723 &emlxs_sfs_debug_msg,
4723 4724 "fca_port_manage: EMLXS_SET_BOOT_STATE: "
4724 4725 "Enable");
4725 4726 ret = emlxs_boot_code_enable(hba);
4726 4727 }
4727 4728
4728 4729 break;
4729 4730 }
4730 4731
4731 4732 case EMLXS_GET_BOOT_STATE:
4732 4733 case EMLXS_GET_BOOT_STATE_old:
4733 4734 {
4734 4735 if (!(hba->flag & FC_ONLINE_MODE)) {
4735 4736 return (FC_OFFLINE);
4736 4737 }
4737 4738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4738 4739 "fca_port_manage: EMLXS_GET_BOOT_STATE");
4739 4740
4740 4741 if (pm->pm_stat_len < sizeof (uint32_t)) {
4741 4742 ret = FC_NOMEM;
4742 4743 break;
4743 4744 }
4744 4745 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4745 4746
4746 4747 ret = emlxs_boot_code_state(hba);
4747 4748
4748 4749 if (ret == FC_SUCCESS) {
4749 4750 *(uint32_t *)pm->pm_stat_buf = 1;
4750 4751 ret = FC_SUCCESS;
4751 4752 } else if (ret == FC_FAILURE) {
4752 4753 ret = FC_SUCCESS;
4753 4754 }
4754 4755
4755 4756 break;
4756 4757 }
4757 4758
4758 4759 case EMLXS_HW_ERROR_TEST:
4759 4760 {
4760 4761 /*
4761 4762 * This command is used for simulating HW ERROR
4762 4763 * on SLI4 only.
4763 4764 */
4764 4765 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4765 4766 ret = FC_INVALID_REQUEST;
4766 4767 break;
4767 4768 }
4768 4769 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
4769 4770 break;
4770 4771 }
4771 4772
4772 4773 case EMLXS_MB_TIMEOUT_TEST:
4773 4774 {
4774 4775 if (!(hba->flag & FC_ONLINE_MODE)) {
4775 4776 return (FC_OFFLINE);
4776 4777 }
4777 4778
4778 4779 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4779 4780 "fca_port_manage: EMLXS_HW_ERROR_TEST");
4780 4781
4781 4782 /* Trigger a mailbox timeout */
4782 4783 hba->mbox_timer = hba->timer_tics;
4783 4784
4784 4785 break;
4785 4786 }
4786 4787
4787 4788 case EMLXS_TEST_CODE:
4788 4789 {
4789 4790 uint32_t *cmd;
4790 4791
4791 4792 if (!(hba->flag & FC_ONLINE_MODE)) {
4792 4793 return (FC_OFFLINE);
4793 4794 }
4794 4795
4795 4796 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4796 4797 "fca_port_manage: EMLXS_TEST_CODE");
4797 4798
4798 4799 if (pm->pm_cmd_len < sizeof (uint32_t)) {
4799 4800 EMLXS_MSGF(EMLXS_CONTEXT,
4800 4801 &emlxs_sfs_debug_msg,
4801 4802 "fca_port_manage: EMLXS_TEST_CODE. "
4802 4803 "inbuf to small.");
4803 4804
4804 4805 ret = FC_BADCMD;
4805 4806 break;
4806 4807 }
4807 4808
4808 4809 cmd = (uint32_t *)pm->pm_cmd_buf;
4809 4810
4810 4811 ret = emlxs_test(hba, cmd[0],
4811 4812 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
4812 4813
4813 4814 break;
4814 4815 }
4815 4816
4816 4817 case EMLXS_BAR_IO:
4817 4818 {
4818 4819 uint32_t *cmd;
4819 4820 uint32_t *datap;
4820 4821 uint32_t offset;
4821 4822 caddr_t addr;
4822 4823 uint32_t i;
4823 4824 uint32_t tx_cnt;
4824 4825 uint32_t chip_cnt;
4825 4826
4826 4827 cmd = (uint32_t *)pm->pm_cmd_buf;
4827 4828 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4828 4829 "fca_port_manage: EMLXS_BAR_IO %x %x %x",
4829 4830 cmd[0], cmd[1], cmd[2]);
4830 4831
4831 4832 offset = cmd[1];
4832 4833
4833 4834 ret = FC_SUCCESS;
4834 4835
4835 4836 switch (cmd[0]) {
4836 4837 case 2: /* bar1read */
4837 4838 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4838 4839 return (FC_BADCMD);
4839 4840 }
4840 4841
4841 4842 /* Registers in this range are invalid */
4842 4843 if ((offset >= 0x4C00) && (offset < 0x5000)) {
4843 4844 return (FC_BADCMD);
4844 4845 }
4845 4846 if ((offset >= 0x5800) || (offset & 0x3)) {
4846 4847 return (FC_BADCMD);
4847 4848 }
4848 4849 datap = (uint32_t *)pm->pm_stat_buf;
4849 4850
4850 4851 for (i = 0; i < pm->pm_stat_len;
4851 4852 i += sizeof (uint32_t)) {
4852 4853 if ((offset >= 0x4C00) &&
4853 4854 (offset < 0x5000)) {
4854 4855 pm->pm_stat_len = i;
4855 4856 break;
4856 4857 }
4857 4858 if (offset >= 0x5800) {
4858 4859 pm->pm_stat_len = i;
4859 4860 break;
4860 4861 }
4861 4862 addr = hba->sli.sli4.bar1_addr + offset;
4862 4863 *datap = READ_BAR1_REG(hba, addr);
4863 4864 datap++;
4864 4865 offset += sizeof (uint32_t);
4865 4866 }
4866 4867 #ifdef FMA_SUPPORT
4867 4868 /* Access handle validation */
4868 4869 EMLXS_CHK_ACC_HANDLE(hba,
4869 4870 hba->sli.sli4.bar1_acc_handle);
4870 4871 #endif /* FMA_SUPPORT */
4871 4872 break;
4872 4873 case 3: /* bar2read */
4873 4874 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4874 4875 return (FC_BADCMD);
4875 4876 }
4876 4877 if ((offset >= 0x1000) || (offset & 0x3)) {
4877 4878 return (FC_BADCMD);
4878 4879 }
4879 4880 datap = (uint32_t *)pm->pm_stat_buf;
4880 4881
4881 4882 for (i = 0; i < pm->pm_stat_len;
4882 4883 i += sizeof (uint32_t)) {
4883 4884 *datap = READ_BAR2_REG(hba,
4884 4885 hba->sli.sli4.bar2_addr + offset);
4885 4886 datap++;
4886 4887 offset += sizeof (uint32_t);
4887 4888 }
4888 4889 #ifdef FMA_SUPPORT
4889 4890 /* Access handle validation */
4890 4891 EMLXS_CHK_ACC_HANDLE(hba,
4891 4892 hba->sli.sli4.bar2_acc_handle);
4892 4893 #endif /* FMA_SUPPORT */
4893 4894 break;
4894 4895 case 4: /* bar1write */
4895 4896 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4896 4897 return (FC_BADCMD);
4897 4898 }
4898 4899 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
4899 4900 offset, cmd[2]);
4900 4901 #ifdef FMA_SUPPORT
4901 4902 /* Access handle validation */
4902 4903 EMLXS_CHK_ACC_HANDLE(hba,
4903 4904 hba->sli.sli4.bar1_acc_handle);
4904 4905 #endif /* FMA_SUPPORT */
4905 4906 break;
4906 4907 case 5: /* bar2write */
4907 4908 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4908 4909 return (FC_BADCMD);
4909 4910 }
4910 4911 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
4911 4912 offset, cmd[2]);
4912 4913 #ifdef FMA_SUPPORT
4913 4914 /* Access handle validation */
4914 4915 EMLXS_CHK_ACC_HANDLE(hba,
4915 4916 hba->sli.sli4.bar2_acc_handle);
4916 4917 #endif /* FMA_SUPPORT */
4917 4918 break;
4918 4919 case 6: /* dumpbsmbox */
4919 4920 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4920 4921 return (FC_BADCMD);
4921 4922 }
4922 4923 if (offset != 0) {
4923 4924 return (FC_BADCMD);
4924 4925 }
4925 4926
4926 4927 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
4927 4928 (caddr_t)pm->pm_stat_buf, 256);
4928 4929 break;
4929 4930 case 7: /* pciread */
4930 4931 if ((offset >= 0x200) || (offset & 0x3)) {
4931 4932 return (FC_BADCMD);
4932 4933 }
4933 4934 datap = (uint32_t *)pm->pm_stat_buf;
4934 4935 for (i = 0; i < pm->pm_stat_len;
4935 4936 i += sizeof (uint32_t)) {
4936 4937 *datap = ddi_get32(hba->pci_acc_handle,
4937 4938 (uint32_t *)(hba->pci_addr +
4938 4939 offset));
4939 4940 datap++;
4940 4941 offset += sizeof (uint32_t);
4941 4942 }
4942 4943 #ifdef FMA_SUPPORT
4943 4944 /* Access handle validation */
4944 4945 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
4945 4946 #endif /* FMA_SUPPORT */
4946 4947 break;
4947 4948 case 8: /* abortall */
4948 4949 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4949 4950 return (FC_BADCMD);
4950 4951 }
4951 4952 emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
4952 4953 datap = (uint32_t *)pm->pm_stat_buf;
4953 4954 *datap++ = tx_cnt;
4954 4955 *datap = chip_cnt;
4955 4956 break;
4956 4957 default:
4957 4958 ret = FC_BADCMD;
4958 4959 break;
4959 4960 }
4960 4961 break;
4961 4962 }
4962 4963
4963 4964 default:
4964 4965
4965 4966 ret = FC_INVALID_REQUEST;
4966 4967 break;
4967 4968 }
4968 4969
4969 4970 break;
4970 4971
4971 4972 }
4972 4973
4973 4974 case FC_PORT_INITIALIZE:
4974 4975 if (!(hba->flag & FC_ONLINE_MODE)) {
4975 4976 return (FC_OFFLINE);
4976 4977 }
4977 4978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4978 4979 "fca_port_manage: FC_PORT_INITIALIZE");
4979 4980 break;
4980 4981
4981 4982 case FC_PORT_LOOPBACK:
4982 4983 if (!(hba->flag & FC_ONLINE_MODE)) {
4983 4984 return (FC_OFFLINE);
4984 4985 }
4985 4986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4986 4987 "fca_port_manage: FC_PORT_LOOPBACK");
4987 4988 break;
4988 4989
4989 4990 case FC_PORT_BYPASS:
4990 4991 if (!(hba->flag & FC_ONLINE_MODE)) {
4991 4992 return (FC_OFFLINE);
4992 4993 }
4993 4994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4994 4995 "fca_port_manage: FC_PORT_BYPASS");
4995 4996 ret = FC_INVALID_REQUEST;
4996 4997 break;
4997 4998
4998 4999 case FC_PORT_UNBYPASS:
4999 5000 if (!(hba->flag & FC_ONLINE_MODE)) {
5000 5001 return (FC_OFFLINE);
5001 5002 }
5002 5003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5003 5004 "fca_port_manage: FC_PORT_UNBYPASS");
5004 5005 ret = FC_INVALID_REQUEST;
5005 5006 break;
5006 5007
5007 5008 case FC_PORT_GET_NODE_ID:
5008 5009 {
5009 5010 fc_rnid_t *rnid;
5010 5011
5011 5012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5012 5013 "fca_port_manage: FC_PORT_GET_NODE_ID");
5013 5014
5014 5015 bzero(pm->pm_data_buf, pm->pm_data_len);
5015 5016
5016 5017 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5017 5018 ret = FC_NOMEM;
5018 5019 break;
5019 5020 }
5020 5021
5021 5022 rnid = (fc_rnid_t *)pm->pm_data_buf;
5022 5023
5023 5024 (void) sprintf((char *)rnid->global_id,
5024 5025 "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5025 5026 hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5026 5027 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5027 5028 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5028 5029 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5029 5030
5030 5031 rnid->unit_type = RNID_HBA;
5031 5032 rnid->port_id = port->did;
5032 5033 rnid->ip_version = RNID_IPV4;
5033 5034
5034 5035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5035 5036 "GET_NODE_ID: wwpn: %s", rnid->global_id);
5036 5037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5037 5038 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5038 5039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5039 5040 "GET_NODE_ID: port_id: 0x%x", rnid->port_id);
5040 5041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5041 5042 "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5042 5043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5043 5044 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5044 5045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5045 5046 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5046 5047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5047 5048 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5048 5049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5049 5050 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5050 5051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5051 5052 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5052 5053
5053 5054 ret = FC_SUCCESS;
5054 5055 break;
5055 5056 }
5056 5057
5057 5058 case FC_PORT_SET_NODE_ID:
5058 5059 {
5059 5060 fc_rnid_t *rnid;
5060 5061
5061 5062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5062 5063 "fca_port_manage: FC_PORT_SET_NODE_ID");
5063 5064
5064 5065 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5065 5066 ret = FC_NOMEM;
5066 5067 break;
5067 5068 }
5068 5069
5069 5070 rnid = (fc_rnid_t *)pm->pm_data_buf;
5070 5071
5071 5072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5072 5073 "SET_NODE_ID: wwpn: %s", rnid->global_id);
5073 5074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5074 5075 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5075 5076 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5076 5077 "SET_NODE_ID: port_id: 0x%x", rnid->port_id);
5077 5078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5078 5079 "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5079 5080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5080 5081 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5081 5082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5082 5083 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5083 5084 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5084 5085 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5085 5086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5086 5087 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5087 5088 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5088 5089 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5089 5090
5090 5091 ret = FC_SUCCESS;
5091 5092 break;
5092 5093 }
5093 5094
5094 5095 #ifdef S11
5095 5096 case FC_PORT_GET_P2P_INFO:
5096 5097 {
5097 5098 fc_fca_p2p_info_t *p2p_info;
5098 5099 NODELIST *ndlp;
5099 5100
5100 5101 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5101 5102 "fca_port_manage: FC_PORT_GET_P2P_INFO");
5102 5103
5103 5104 bzero(pm->pm_data_buf, pm->pm_data_len);
5104 5105
5105 5106 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5106 5107 ret = FC_NOMEM;
5107 5108 break;
5108 5109 }
5109 5110
5110 5111 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5111 5112
5112 5113 if (hba->state >= FC_LINK_UP) {
5113 5114 if ((hba->topology == TOPOLOGY_PT_PT) &&
5114 5115 (hba->flag & FC_PT_TO_PT)) {
5115 5116 p2p_info->fca_d_id = port->did;
5116 5117 p2p_info->d_id = port->rdid;
5117 5118
5118 5119 ndlp = emlxs_node_find_did(port,
5119 5120 port->rdid);
5120 5121
5121 5122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5122 5123 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5123 5124 "d_id: 0x%x, ndlp: 0x%p", port->did,
5124 5125 port->rdid, ndlp);
5125 5126 if (ndlp) {
5126 5127 bcopy(&ndlp->nlp_portname,
5127 5128 (caddr_t)&p2p_info->pwwn,
5128 5129 sizeof (la_wwn_t));
5129 5130 bcopy(&ndlp->nlp_nodename,
5130 5131 (caddr_t)&p2p_info->nwwn,
5131 5132 sizeof (la_wwn_t));
5132 5133
5133 5134 ret = FC_SUCCESS;
5134 5135 break;
5135 5136
5136 5137 }
5137 5138 }
5138 5139 }
5139 5140
5140 5141 ret = FC_FAILURE;
5141 5142 break;
5142 5143 }
5143 5144 #endif /* S11 */
5144 5145
5145 5146 default:
5146 5147 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5147 5148 "fca_port_manage: code=%x", pm->pm_cmd_code);
5148 5149 ret = FC_INVALID_REQUEST;
5149 5150 break;
5150 5151
5151 5152 }
5152 5153
5153 5154 return (ret);
5154 5155
5155 5156 } /* emlxs_fca_port_manage() */
5156 5157
5157 5158
5158 5159 /*ARGSUSED*/
5159 5160 static uint32_t
5160 5161 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5161 5162 uint32_t *arg)
5162 5163 {
5163 5164 uint32_t rval = 0;
5164 5165 emlxs_port_t *port = &PPORT;
5165 5166
5166 5167 switch (test_code) {
5167 5168 #ifdef TEST_SUPPORT
5168 5169 case 1: /* SCSI underrun */
5169 5170 {
5170 5171 hba->underrun_counter = (args)? arg[0]:1;
5171 5172 break;
5172 5173 }
5173 5174 #endif /* TEST_SUPPORT */
5174 5175
5175 5176 default:
5176 5177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5177 5178 "emlxs_test: Unsupported test code. (0x%x)", test_code);
5178 5179 rval = FC_INVALID_REQUEST;
5179 5180 }
5180 5181
5181 5182 return (rval);
5182 5183
5183 5184 } /* emlxs_test() */
5184 5185
5185 5186
5186 5187 /*
5187 5188 * Given the device number, return the devinfo pointer or the ddiinst number.
5188 5189 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5189 5190 * before attach.
5190 5191 *
5191 5192 * Translate "dev_t" to a pointer to the associated "dev_info_t".
5192 5193 */
5193 5194 /*ARGSUSED*/
5194 5195 static int
5195 5196 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5196 5197 {
5197 5198 emlxs_hba_t *hba;
5198 5199 int32_t ddiinst;
5199 5200
5200 5201 ddiinst = getminor((dev_t)arg);
5201 5202
5202 5203 switch (infocmd) {
5203 5204 case DDI_INFO_DEVT2DEVINFO:
5204 5205 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5205 5206 if (hba)
5206 5207 *result = hba->dip;
5207 5208 else
5208 5209 *result = NULL;
5209 5210 break;
5210 5211
5211 5212 case DDI_INFO_DEVT2INSTANCE:
5212 5213 *result = (void *)((unsigned long)ddiinst);
5213 5214 break;
5214 5215
5215 5216 default:
5216 5217 return (DDI_FAILURE);
5217 5218 }
5218 5219
5219 5220 return (DDI_SUCCESS);
5220 5221
5221 5222 } /* emlxs_info() */
5222 5223
5223 5224
5224 5225 static int32_t
5225 5226 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5226 5227 {
5227 5228 emlxs_hba_t *hba;
5228 5229 emlxs_port_t *port;
5229 5230 int32_t ddiinst;
5230 5231 int rval = DDI_SUCCESS;
5231 5232
5232 5233 ddiinst = ddi_get_instance(dip);
5233 5234 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5234 5235 port = &PPORT;
5235 5236
5236 5237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5237 5238 "fca_power: comp=%x level=%x", comp, level);
5238 5239
5239 5240 if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5240 5241 return (DDI_FAILURE);
5241 5242 }
5242 5243
5243 5244 mutex_enter(&EMLXS_PM_LOCK);
5244 5245
5245 5246 /* If we are already at the proper level then return success */
5246 5247 if (hba->pm_level == level) {
5247 5248 mutex_exit(&EMLXS_PM_LOCK);
5248 5249 return (DDI_SUCCESS);
5249 5250 }
5250 5251
5251 5252 switch (level) {
5252 5253 case EMLXS_PM_ADAPTER_UP:
5253 5254
5254 5255 /*
5255 5256 * If we are already in emlxs_attach,
5256 5257 * let emlxs_hba_attach take care of things
5257 5258 */
5258 5259 if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5259 5260 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5260 5261 break;
5261 5262 }
5262 5263
5263 5264 /* Check if adapter is suspended */
5264 5265 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5265 5266 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5266 5267
5267 5268 /* Try to resume the port */
5268 5269 rval = emlxs_hba_resume(dip);
5269 5270
5270 5271 if (rval != DDI_SUCCESS) {
5271 5272 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5272 5273 }
5273 5274 break;
5274 5275 }
5275 5276
5276 5277 /* Set adapter up */
5277 5278 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5278 5279 break;
5279 5280
5280 5281 case EMLXS_PM_ADAPTER_DOWN:
5281 5282
5282 5283
5283 5284 /*
5284 5285 * If we are already in emlxs_detach,
5285 5286 * let emlxs_hba_detach take care of things
5286 5287 */
5287 5288 if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5288 5289 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5289 5290 break;
5290 5291 }
5291 5292
5292 5293 /* Check if adapter is not suspended */
5293 5294 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5294 5295 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5295 5296
5296 5297 /* Try to suspend the port */
5297 5298 rval = emlxs_hba_suspend(dip);
5298 5299
5299 5300 if (rval != DDI_SUCCESS) {
5300 5301 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5301 5302 }
5302 5303
5303 5304 break;
5304 5305 }
5305 5306
5306 5307 /* Set adapter down */
5307 5308 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5308 5309 break;
5309 5310
5310 5311 default:
5311 5312 rval = DDI_FAILURE;
5312 5313 break;
5313 5314
5314 5315 }
5315 5316
5316 5317 mutex_exit(&EMLXS_PM_LOCK);
5317 5318
5318 5319 return (rval);
5319 5320
5320 5321 } /* emlxs_power() */
5321 5322
5322 5323
5323 5324 #ifdef EMLXS_I386
5324 5325 #ifdef S11
5325 5326 /*
5326 5327 * quiesce(9E) entry point.
5327 5328 *
5328 5329 * This function is called when the system is single-thread at hight PIL
5329 5330 * with preemption disabled. Therefore, this function must not be blocked.
5330 5331 *
5331 5332 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5332 5333 * DDI_FAILURE indicates an error condition and should almost never happen.
5333 5334 */
5334 5335 static int
5335 5336 emlxs_quiesce(dev_info_t *dip)
5336 5337 {
5337 5338 emlxs_hba_t *hba;
5338 5339 emlxs_port_t *port;
5339 5340 int32_t ddiinst;
5340 5341 int rval = DDI_SUCCESS;
5341 5342
5342 5343 ddiinst = ddi_get_instance(dip);
5343 5344 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5344 5345 port = &PPORT;
5345 5346
5346 5347 if (hba == NULL || port == NULL) {
5347 5348 return (DDI_FAILURE);
5348 5349 }
5349 5350
5350 5351 /* The fourth arg 1 indicates the call is from quiesce */
5351 5352 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5352 5353 return (rval);
5353 5354 } else {
5354 5355 return (DDI_FAILURE);
5355 5356 }
5356 5357
5357 5358 } /* emlxs_quiesce */
5358 5359 #endif
5359 5360 #endif /* EMLXS_I386 */
5360 5361
5361 5362
5362 5363 static int
5363 5364 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5364 5365 {
5365 5366 emlxs_hba_t *hba;
5366 5367 emlxs_port_t *port;
5367 5368 int ddiinst;
5368 5369
5369 5370 ddiinst = getminor(*dev_p);
5370 5371 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5371 5372
5372 5373 if (hba == NULL) {
5373 5374 return (ENXIO);
5374 5375 }
5375 5376
5376 5377 port = &PPORT;
5377 5378
5378 5379 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5379 5380 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5380 5381 "open failed: Driver suspended.");
5381 5382 return (ENXIO);
5382 5383 }
5383 5384
5384 5385 if (otype != OTYP_CHR) {
5385 5386 return (EINVAL);
5386 5387 }
5387 5388
5388 5389 if (drv_priv(cred_p)) {
5389 5390 return (EPERM);
5390 5391 }
5391 5392
5392 5393 mutex_enter(&EMLXS_IOCTL_LOCK);
5393 5394
5394 5395 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5395 5396 mutex_exit(&EMLXS_IOCTL_LOCK);
5396 5397 return (EBUSY);
5397 5398 }
5398 5399
5399 5400 if (flag & FEXCL) {
5400 5401 if (hba->ioctl_flags & EMLXS_OPEN) {
5401 5402 mutex_exit(&EMLXS_IOCTL_LOCK);
5402 5403 return (EBUSY);
5403 5404 }
5404 5405
5405 5406 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5406 5407 }
5407 5408
5408 5409 hba->ioctl_flags |= EMLXS_OPEN;
5409 5410
5410 5411 mutex_exit(&EMLXS_IOCTL_LOCK);
5411 5412
5412 5413 return (0);
5413 5414
5414 5415 } /* emlxs_open() */
5415 5416
5416 5417
5417 5418 /*ARGSUSED*/
5418 5419 static int
5419 5420 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
5420 5421 {
5421 5422 emlxs_hba_t *hba;
5422 5423 int ddiinst;
5423 5424
5424 5425 ddiinst = getminor(dev);
5425 5426 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5426 5427
5427 5428 if (hba == NULL) {
5428 5429 return (ENXIO);
5429 5430 }
5430 5431
5431 5432 if (otype != OTYP_CHR) {
5432 5433 return (EINVAL);
5433 5434 }
5434 5435
5435 5436 mutex_enter(&EMLXS_IOCTL_LOCK);
5436 5437
5437 5438 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5438 5439 mutex_exit(&EMLXS_IOCTL_LOCK);
5439 5440 return (ENODEV);
5440 5441 }
5441 5442
5442 5443 hba->ioctl_flags &= ~EMLXS_OPEN;
5443 5444 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
5444 5445
5445 5446 mutex_exit(&EMLXS_IOCTL_LOCK);
5446 5447
5447 5448 return (0);
5448 5449
5449 5450 } /* emlxs_close() */
5450 5451
5451 5452
5452 5453 /*ARGSUSED*/
5453 5454 static int
5454 5455 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
5455 5456 cred_t *cred_p, int32_t *rval_p)
5456 5457 {
5457 5458 emlxs_hba_t *hba;
5458 5459 emlxs_port_t *port;
5459 5460 int rval = 0; /* return code */
5460 5461 int ddiinst;
5461 5462
5462 5463 ddiinst = getminor(dev);
5463 5464 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5464 5465
5465 5466 if (hba == NULL) {
5466 5467 return (ENXIO);
5467 5468 }
5468 5469
5469 5470 port = &PPORT;
5470 5471
5471 5472 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5472 5473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5473 5474 "ioctl failed: Driver suspended.");
5474 5475
5475 5476 return (ENXIO);
5476 5477 }
5477 5478
5478 5479 mutex_enter(&EMLXS_IOCTL_LOCK);
5479 5480 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
5480 5481 mutex_exit(&EMLXS_IOCTL_LOCK);
5481 5482 return (ENXIO);
5482 5483 }
5483 5484 mutex_exit(&EMLXS_IOCTL_LOCK);
5484 5485
5485 5486 #ifdef IDLE_TIMER
5486 5487 emlxs_pm_busy_component(hba);
5487 5488 #endif /* IDLE_TIMER */
5488 5489
5489 5490 switch (cmd) {
5490 5491 case EMLXS_DFC_COMMAND:
5491 5492 rval = emlxs_dfc_manage(hba, (void *)arg, mode);
5492 5493 break;
5493 5494
5494 5495 default:
5495 5496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5496 5497 "ioctl: Invalid command received. cmd=%x", cmd);
5497 5498 rval = EINVAL;
5498 5499 }
5499 5500
5500 5501 done:
5501 5502 return (rval);
5502 5503
5503 5504 } /* emlxs_ioctl() */
5504 5505
5505 5506
5506 5507
5507 5508 /*
5508 5509 *
5509 5510 * Device Driver Common Routines
5510 5511 *
5511 5512 */
5512 5513
5513 5514 /* EMLXS_PM_LOCK must be held for this call */
5514 5515 static int
5515 5516 emlxs_hba_resume(dev_info_t *dip)
5516 5517 {
5517 5518 emlxs_hba_t *hba;
5518 5519 emlxs_port_t *port;
5519 5520 int ddiinst;
5520 5521
5521 5522 ddiinst = ddi_get_instance(dip);
5522 5523 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5523 5524 port = &PPORT;
5524 5525
5525 5526 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
5526 5527
5527 5528 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5528 5529 return (DDI_SUCCESS);
5529 5530 }
5530 5531
5531 5532 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5532 5533
5533 5534 /* Take the adapter online */
5534 5535 if (emlxs_power_up(hba)) {
5535 5536 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
5536 5537 "Unable to take adapter online.");
5537 5538
5538 5539 hba->pm_state |= EMLXS_PM_SUSPENDED;
5539 5540
5540 5541 return (DDI_FAILURE);
5541 5542 }
5542 5543
5543 5544 return (DDI_SUCCESS);
5544 5545
5545 5546 } /* emlxs_hba_resume() */
5546 5547
5547 5548
5548 5549 /* EMLXS_PM_LOCK must be held for this call */
5549 5550 static int
5550 5551 emlxs_hba_suspend(dev_info_t *dip)
5551 5552 {
5552 5553 emlxs_hba_t *hba;
5553 5554 emlxs_port_t *port;
5554 5555 int ddiinst;
5555 5556
5556 5557 ddiinst = ddi_get_instance(dip);
5557 5558 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5558 5559 port = &PPORT;
5559 5560
5560 5561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
5561 5562
5562 5563 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5563 5564 return (DDI_SUCCESS);
5564 5565 }
5565 5566
5566 5567 hba->pm_state |= EMLXS_PM_SUSPENDED;
5567 5568
5568 5569 /* Take the adapter offline */
5569 5570 if (emlxs_power_down(hba)) {
5570 5571 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
5571 5572
5572 5573 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
5573 5574 "Unable to take adapter offline.");
5574 5575
5575 5576 return (DDI_FAILURE);
5576 5577 }
5577 5578
5578 5579 return (DDI_SUCCESS);
5579 5580
5580 5581 } /* emlxs_hba_suspend() */
5581 5582
5582 5583
5583 5584
5584 5585 static void
5585 5586 emlxs_lock_init(emlxs_hba_t *hba)
5586 5587 {
5587 5588 emlxs_port_t *port = &PPORT;
5588 5589 int32_t ddiinst;
5589 5590 char buf[64];
5590 5591 uint32_t i;
5591 5592
5592 5593 ddiinst = hba->ddiinst;
5593 5594
5594 5595 /* Initialize the power management */
5595 5596 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst);
5596 5597 mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER,
5597 5598 DDI_INTR_PRI(hba->intr_arg));
5598 5599
5599 5600 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst);
5600 5601 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER,
5601 5602 DDI_INTR_PRI(hba->intr_arg));
5602 5603
5603 5604 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst);
5604 5605 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL);
5605 5606
5606 5607 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst);
5607 5608 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER,
5608 5609 DDI_INTR_PRI(hba->intr_arg));
5609 5610
5610 5611 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst);
5611 5612 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER,
5612 5613 DDI_INTR_PRI(hba->intr_arg));
5613 5614
5614 5615 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst);
5615 5616 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL);
5616 5617
5617 5618 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst);
5618 5619 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER,
5619 5620 DDI_INTR_PRI(hba->intr_arg));
5620 5621
5621 5622 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst);
5622 5623 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL);
5623 5624
5624 5625 (void) sprintf(buf, "%s%d_tx channel_lock mutex", DRIVER_NAME, ddiinst);
5625 5626 mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER,
5626 5627 DDI_INTR_PRI(hba->intr_arg));
5627 5628
5628 5629 for (i = 0; i < MAX_RINGS; i++) {
5629 5630 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME,
5630 5631 ddiinst, i);
5631 5632 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER,
5632 5633 DDI_INTR_PRI(hba->intr_arg));
5633 5634 }
5634 5635
5635 5636
5636 5637 for (i = 0; i < EMLXS_MAX_WQS; i++) {
5637 5638 (void) sprintf(buf, "%s%d wq_cq_eq%d lock mutex", DRIVER_NAME,
5638 5639 ddiinst, i);
5639 5640 mutex_init(&EMLXS_QUE_LOCK(i), buf, MUTEX_DRIVER,
5640 5641 DDI_INTR_PRI(hba->intr_arg));
5641 5642 }
5642 5643
5643 5644 (void) sprintf(buf, "%s%d_msiid lock mutex", DRIVER_NAME, ddiinst);
5644 5645 mutex_init(&EMLXS_MSIID_LOCK, buf, MUTEX_DRIVER,
5645 5646 DDI_INTR_PRI(hba->intr_arg));
5646 5647
5647 5648 (void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst);
5648 5649 mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER,
5649 5650 DDI_INTR_PRI(hba->intr_arg));
5650 5651
5651 5652 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst);
5652 5653 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER,
5653 5654 DDI_INTR_PRI(hba->intr_arg));
5654 5655
5655 5656 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst);
5656 5657 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER,
5657 5658 DDI_INTR_PRI(hba->intr_arg));
5658 5659
5659 5660 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst);
5660 5661 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER,
5661 5662 DDI_INTR_PRI(hba->intr_arg));
5662 5663
5663 5664 #ifdef DUMP_SUPPORT
5664 5665 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst);
5665 5666 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER,
5666 5667 DDI_INTR_PRI(hba->intr_arg));
5667 5668 #endif /* DUMP_SUPPORT */
5668 5669
5669 5670 (void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst);
5670 5671 mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER,
5671 5672 DDI_INTR_PRI(hba->intr_arg));
5672 5673
5673 5674 /* Create per port locks */
5674 5675 for (i = 0; i < MAX_VPORTS; i++) {
5675 5676 port = &VPORT(i);
5676 5677
5677 5678 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
5678 5679
5679 5680 if (i == 0) {
5680 5681 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME,
5681 5682 ddiinst);
5682 5683 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5683 5684 DDI_INTR_PRI(hba->intr_arg));
5684 5685
5685 5686 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME,
5686 5687 ddiinst);
5687 5688 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5688 5689
5689 5690 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME,
5690 5691 ddiinst);
5691 5692 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5692 5693 DDI_INTR_PRI(hba->intr_arg));
5693 5694 } else {
5694 5695 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex",
5695 5696 DRIVER_NAME, ddiinst, port->vpi);
5696 5697 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER,
5697 5698 DDI_INTR_PRI(hba->intr_arg));
5698 5699
5699 5700 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME,
5700 5701 ddiinst, port->vpi);
5701 5702 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL);
5702 5703
5703 5704 (void) sprintf(buf, "%s%d.%d_ub_lock mutex",
5704 5705 DRIVER_NAME, ddiinst, port->vpi);
5705 5706 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER,
5706 5707 DDI_INTR_PRI(hba->intr_arg));
5707 5708 }
5708 5709 }
5709 5710
5710 5711 return;
5711 5712
5712 5713 } /* emlxs_lock_init() */
5713 5714
5714 5715
5715 5716
5716 5717 static void
5717 5718 emlxs_lock_destroy(emlxs_hba_t *hba)
5718 5719 {
5719 5720 emlxs_port_t *port = &PPORT;
5720 5721 uint32_t i;
5721 5722
5722 5723 mutex_destroy(&EMLXS_TIMER_LOCK);
5723 5724 cv_destroy(&hba->timer_lock_cv);
5724 5725
5725 5726 mutex_destroy(&EMLXS_PORT_LOCK);
5726 5727
5727 5728 cv_destroy(&EMLXS_MBOX_CV);
5728 5729 cv_destroy(&EMLXS_LINKUP_CV);
5729 5730
5730 5731 mutex_destroy(&EMLXS_LINKUP_LOCK);
5731 5732 mutex_destroy(&EMLXS_MBOX_LOCK);
5732 5733
5733 5734 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
5734 5735
5735 5736 for (i = 0; i < MAX_RINGS; i++) {
5736 5737 mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
5737 5738 }
5738 5739
5739 5740 for (i = 0; i < EMLXS_MAX_WQS; i++) {
5740 5741 mutex_destroy(&EMLXS_QUE_LOCK(i));
5741 5742 }
5742 5743
5743 5744 mutex_destroy(&EMLXS_MSIID_LOCK);
5744 5745
5745 5746 mutex_destroy(&EMLXS_FCTAB_LOCK);
5746 5747 mutex_destroy(&EMLXS_MEMGET_LOCK);
5747 5748 mutex_destroy(&EMLXS_MEMPUT_LOCK);
5748 5749 mutex_destroy(&EMLXS_IOCTL_LOCK);
5749 5750 mutex_destroy(&EMLXS_SPAWN_LOCK);
5750 5751 mutex_destroy(&EMLXS_PM_LOCK);
5751 5752
5752 5753 #ifdef DUMP_SUPPORT
5753 5754 mutex_destroy(&EMLXS_DUMP_LOCK);
5754 5755 #endif /* DUMP_SUPPORT */
5755 5756
5756 5757 /* Destroy per port locks */
5757 5758 for (i = 0; i < MAX_VPORTS; i++) {
5758 5759 port = &VPORT(i);
5759 5760 rw_destroy(&port->node_rwlock);
5760 5761 mutex_destroy(&EMLXS_PKT_LOCK);
5761 5762 cv_destroy(&EMLXS_PKT_CV);
5762 5763 mutex_destroy(&EMLXS_UB_LOCK);
5763 5764 }
5764 5765
5765 5766 return;
5766 5767
5767 5768 } /* emlxs_lock_destroy() */
5768 5769
5769 5770
5770 5771 /* init_flag values */
5771 5772 #define ATTACH_SOFT_STATE 0x00000001
5772 5773 #define ATTACH_FCA_TRAN 0x00000002
5773 5774 #define ATTACH_HBA 0x00000004
5774 5775 #define ATTACH_LOG 0x00000008
5775 5776 #define ATTACH_MAP_BUS 0x00000010
5776 5777 #define ATTACH_INTR_INIT 0x00000020
5777 5778 #define ATTACH_PROP 0x00000040
5778 5779 #define ATTACH_LOCK 0x00000080
5779 5780 #define ATTACH_THREAD 0x00000100
5780 5781 #define ATTACH_INTR_ADD 0x00000200
5781 5782 #define ATTACH_ONLINE 0x00000400
5782 5783 #define ATTACH_NODE 0x00000800
5783 5784 #define ATTACH_FCT 0x00001000
5784 5785 #define ATTACH_FCA 0x00002000
5785 5786 #define ATTACH_KSTAT 0x00004000
5786 5787 #define ATTACH_DHCHAP 0x00008000
5787 5788 #define ATTACH_FM 0x00010000
5788 5789 #define ATTACH_MAP_SLI 0x00020000
5789 5790 #define ATTACH_SPAWN 0x00040000
5790 5791 #define ATTACH_EVENTS 0x00080000
5791 5792
5792 5793 static void
5793 5794 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
5794 5795 {
5795 5796 emlxs_hba_t *hba = NULL;
5796 5797 int ddiinst;
5797 5798
5798 5799 ddiinst = ddi_get_instance(dip);
5799 5800
5800 5801 if (init_flag & ATTACH_HBA) {
5801 5802 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5802 5803
5803 5804 if (init_flag & ATTACH_SPAWN) {
5804 5805 emlxs_thread_spawn_destroy(hba);
5805 5806 }
5806 5807
5807 5808 if (init_flag & ATTACH_EVENTS) {
5808 5809 (void) emlxs_event_queue_destroy(hba);
5809 5810 }
5810 5811
5811 5812 if (init_flag & ATTACH_ONLINE) {
5812 5813 (void) emlxs_offline(hba);
5813 5814 }
5814 5815
5815 5816 if (init_flag & ATTACH_INTR_ADD) {
5816 5817 (void) EMLXS_INTR_REMOVE(hba);
5817 5818 }
5818 5819 #ifdef SFCT_SUPPORT
5819 5820 if (init_flag & ATTACH_FCT) {
5820 5821 emlxs_fct_detach(hba);
5821 5822 if (hba->tgt_mode) {
5822 5823 emlxs_fct_modclose();
5823 5824 }
5824 5825 }
5825 5826 #endif /* SFCT_SUPPORT */
5826 5827
5827 5828 #ifdef DHCHAP_SUPPORT
5828 5829 if (init_flag & ATTACH_DHCHAP) {
5829 5830 emlxs_dhc_detach(hba);
5830 5831 }
5831 5832 #endif /* DHCHAP_SUPPORT */
5832 5833
5833 5834 if (init_flag & ATTACH_KSTAT) {
5834 5835 kstat_delete(hba->kstat);
5835 5836 }
5836 5837
5837 5838 if (init_flag & ATTACH_FCA) {
5838 5839 emlxs_fca_detach(hba);
5839 5840 }
5840 5841
5841 5842 if (init_flag & ATTACH_NODE) {
5842 5843 (void) ddi_remove_minor_node(hba->dip, "devctl");
5843 5844 }
5844 5845
5845 5846 if (init_flag & ATTACH_THREAD) {
5846 5847 emlxs_thread_destroy(&hba->iodone_thread);
5847 5848 }
5848 5849
5849 5850 if (init_flag & ATTACH_PROP) {
5850 5851 (void) ddi_prop_remove_all(hba->dip);
5851 5852 }
5852 5853
5853 5854 if (init_flag & ATTACH_LOCK) {
5854 5855 emlxs_lock_destroy(hba);
5855 5856 }
5856 5857
5857 5858 if (init_flag & ATTACH_INTR_INIT) {
5858 5859 (void) EMLXS_INTR_UNINIT(hba);
5859 5860 }
5860 5861
5861 5862 if (init_flag & ATTACH_MAP_BUS) {
5862 5863 emlxs_unmap_bus(hba);
5863 5864 }
5864 5865
5865 5866 if (init_flag & ATTACH_MAP_SLI) {
5866 5867 EMLXS_SLI_UNMAP_HDW(hba);
5867 5868 }
5868 5869
5869 5870 #ifdef FMA_SUPPORT
5870 5871 if (init_flag & ATTACH_FM) {
5871 5872 emlxs_fm_fini(hba);
5872 5873 }
5873 5874 #endif /* FMA_SUPPORT */
5874 5875
5875 5876 if (init_flag & ATTACH_LOG) {
5876 5877 emlxs_msg_log_destroy(hba);
5877 5878 }
5878 5879
5879 5880 if (init_flag & ATTACH_FCA_TRAN) {
5880 5881 (void) ddi_set_driver_private(hba->dip, NULL);
5881 5882 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
5882 5883 hba->fca_tran = NULL;
5883 5884 }
5884 5885
5885 5886 if (init_flag & ATTACH_HBA) {
5886 5887 emlxs_device.log[hba->emlxinst] = 0;
5887 5888 emlxs_device.hba[hba->emlxinst] =
5888 5889 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
5889 5890 #ifdef DUMP_SUPPORT
5890 5891 emlxs_device.dump_txtfile[hba->emlxinst] = 0;
5891 5892 emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
5892 5893 emlxs_device.dump_ceefile[hba->emlxinst] = 0;
5893 5894 #endif /* DUMP_SUPPORT */
5894 5895
5895 5896 }
5896 5897 }
5897 5898
5898 5899 if (init_flag & ATTACH_SOFT_STATE) {
5899 5900 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
5900 5901 }
5901 5902
5902 5903 return;
5903 5904
5904 5905 } /* emlxs_driver_remove() */
5905 5906
5906 5907
5907 5908
5908 5909 /* This determines which ports will be initiator mode */
5909 5910 static void
5910 5911 emlxs_fca_init(emlxs_hba_t *hba)
5911 5912 {
5912 5913 emlxs_port_t *port = &PPORT;
5913 5914 emlxs_port_t *vport;
5914 5915 uint32_t i;
5915 5916
5916 5917 if (!hba->ini_mode) {
5917 5918 return;
5918 5919 }
5919 5920 /* Check if SFS present */
5920 5921 if (((void *)MODSYM(fc_fca_init) == NULL) ||
5921 5922 ((void *)MODSYM(fc_fca_attach) == NULL)) {
5922 5923 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5923 5924 "SFS not present. Initiator mode disabled.");
5924 5925 goto failed;
5925 5926 }
5926 5927
5927 5928 /* Check if our SFS driver interface matches the current SFS stack */
5928 5929 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
5929 5930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5930 5931 "SFS/FCA version mismatch. FCA=0x%x",
5931 5932 hba->fca_tran->fca_version);
5932 5933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5933 5934 "SFS present. Initiator mode disabled.");
5934 5935
5935 5936 goto failed;
5936 5937 }
5937 5938
5938 5939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
5939 5940 "SFS present. Initiator mode enabled.");
5940 5941
5941 5942 return;
5942 5943
5943 5944 failed:
5944 5945
5945 5946 hba->ini_mode = 0;
5946 5947 for (i = 0; i < MAX_VPORTS; i++) {
5947 5948 vport = &VPORT(i);
5948 5949 vport->ini_mode = 0;
5949 5950 }
5950 5951
5951 5952 return;
5952 5953
5953 5954 } /* emlxs_fca_init() */
5954 5955
5955 5956
5956 5957 /* This determines which ports will be initiator or target mode */
5957 5958 static void
5958 5959 emlxs_set_mode(emlxs_hba_t *hba)
5959 5960 {
5960 5961 emlxs_port_t *port = &PPORT;
5961 5962 emlxs_port_t *vport;
5962 5963 uint32_t i;
5963 5964 uint32_t tgt_mode = 0;
5964 5965
5965 5966 #ifdef SFCT_SUPPORT
5966 5967 emlxs_config_t *cfg;
5967 5968
5968 5969 cfg = &hba->config[CFG_TARGET_MODE];
5969 5970 tgt_mode = cfg->current;
5970 5971
5971 5972 if (tgt_mode) {
5972 5973 if (emlxs_fct_modopen() != 0) {
5973 5974 tgt_mode = 0;
5974 5975 }
5975 5976 }
5976 5977
5977 5978 port->fct_flags = 0;
5978 5979 #endif /* SFCT_SUPPORT */
5979 5980
5980 5981 /* Initialize physical port */
5981 5982 if (tgt_mode) {
5982 5983 hba->tgt_mode = 1;
5983 5984 hba->ini_mode = 0;
5984 5985
5985 5986 port->tgt_mode = 1;
5986 5987 port->ini_mode = 0;
5987 5988 } else {
5988 5989 hba->tgt_mode = 0;
5989 5990 hba->ini_mode = 1;
5990 5991
5991 5992 port->tgt_mode = 0;
5992 5993 port->ini_mode = 1;
5993 5994 }
5994 5995
5995 5996 /* Initialize virtual ports */
5996 5997 /* Virtual ports take on the mode of the parent physical port */
5997 5998 for (i = 1; i < MAX_VPORTS; i++) {
5998 5999 vport = &VPORT(i);
5999 6000
6000 6001 #ifdef SFCT_SUPPORT
6001 6002 vport->fct_flags = 0;
6002 6003 #endif /* SFCT_SUPPORT */
6003 6004
6004 6005 vport->ini_mode = port->ini_mode;
6005 6006 vport->tgt_mode = port->tgt_mode;
6006 6007 }
6007 6008
6008 6009 /* Check if initiator mode is requested */
6009 6010 if (hba->ini_mode) {
6010 6011 emlxs_fca_init(hba);
6011 6012 } else {
6012 6013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6013 6014 "Initiator mode not enabled.");
6014 6015 }
6015 6016
6016 6017 #ifdef SFCT_SUPPORT
6017 6018 /* Check if target mode is requested */
6018 6019 if (hba->tgt_mode) {
6019 6020 emlxs_fct_init(hba);
6020 6021 } else {
6021 6022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6022 6023 "Target mode not enabled.");
6023 6024 }
6024 6025 #endif /* SFCT_SUPPORT */
6025 6026
6026 6027 return;
6027 6028
6028 6029 } /* emlxs_set_mode() */
6029 6030
6030 6031
6031 6032
6032 6033 static void
6033 6034 emlxs_fca_attach(emlxs_hba_t *hba)
6034 6035 {
6035 6036 /* Update our transport structure */
6036 6037 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
6037 6038 hba->fca_tran->fca_cmd_max = hba->io_throttle;
6038 6039
6039 6040 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6040 6041 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6041 6042 sizeof (NAME_TYPE));
6042 6043 #endif /* >= EMLXS_MODREV5 */
6043 6044
6044 6045 return;
6045 6046
6046 6047 } /* emlxs_fca_attach() */
6047 6048
6048 6049
6049 6050 static void
6050 6051 emlxs_fca_detach(emlxs_hba_t *hba)
6051 6052 {
6052 6053 uint32_t i;
6053 6054 emlxs_port_t *vport;
6054 6055
6055 6056 if (hba->ini_mode) {
6056 6057 if ((void *)MODSYM(fc_fca_detach) != NULL) {
6057 6058 MODSYM(fc_fca_detach)(hba->dip);
6058 6059 }
6059 6060
6060 6061 hba->ini_mode = 0;
6061 6062
6062 6063 for (i = 0; i < MAX_VPORTS; i++) {
6063 6064 vport = &VPORT(i);
6064 6065 vport->ini_mode = 0;
6065 6066 }
6066 6067 }
6067 6068
6068 6069 return;
6069 6070
6070 6071 } /* emlxs_fca_detach() */
6071 6072
6072 6073
6073 6074
6074 6075 static void
6075 6076 emlxs_drv_banner(emlxs_hba_t *hba)
6076 6077 {
6077 6078 emlxs_port_t *port = &PPORT;
6078 6079 uint32_t i;
6079 6080 char sli_mode[16];
6080 6081 char msi_mode[16];
6081 6082 char npiv_mode[16];
6082 6083 emlxs_vpd_t *vpd = &VPD;
6083 6084 emlxs_config_t *cfg = &CFG;
6084 6085 uint8_t *wwpn;
6085 6086 uint8_t *wwnn;
6086 6087 uint32_t fw_show = 0;
6087 6088
6088 6089 /* Display firmware library one time for all driver instances */
6089 6090 mutex_enter(&emlxs_device.lock);
6090 6091 if (! (emlxs_instance_flag & EMLXS_FW_SHOW)) {
6091 6092 emlxs_instance_flag |= EMLXS_FW_SHOW;
6092 6093 fw_show = 1;
6093 6094 }
6094 6095 mutex_exit(&emlxs_device.lock);
6095 6096
6096 6097 if (fw_show) {
6097 6098 emlxs_fw_show(hba);
6098 6099 }
6099 6100
6100 6101 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6101 6102 emlxs_revision);
6102 6103
6103 6104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6104 6105 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6105 6106 hba->model_info.device_id, hba->model_info.ssdid,
6106 6107 hba->model_info.id);
6107 6108
6108 6109 #ifdef EMLXS_I386
6109 6110
6110 6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6111 6112 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6112 6113 vpd->boot_version);
6113 6114
6114 6115 #else /* EMLXS_SPARC */
6115 6116
6116 6117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6117 6118 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6118 6119 vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6119 6120
6120 6121 #endif /* EMLXS_I386 */
6121 6122
6122 6123 if (hba->sli_mode > 3) {
6123 6124 (void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode,
6124 6125 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6125 6126 } else {
6126 6127 (void) sprintf(sli_mode, "SLI:%d", hba->sli_mode);
6127 6128 }
6128 6129
6129 6130 (void) strcpy(msi_mode, " INTX:1");
6130 6131
6131 6132 #ifdef MSI_SUPPORT
6132 6133 if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6133 6134 switch (hba->intr_type) {
6134 6135 case DDI_INTR_TYPE_FIXED:
6135 6136 (void) strcpy(msi_mode, " MSI:0");
6136 6137 break;
6137 6138
6138 6139 case DDI_INTR_TYPE_MSI:
6139 6140 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count);
6140 6141 break;
6141 6142
6142 6143 case DDI_INTR_TYPE_MSIX:
6143 6144 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count);
6144 6145 break;
6145 6146 }
6146 6147 }
6147 6148 #endif
6148 6149
6149 6150 (void) strcpy(npiv_mode, "");
6150 6151
6151 6152 if (hba->flag & FC_NPIV_ENABLED) {
6152 6153 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1);
6153 6154 } else {
6154 6155 (void) strcpy(npiv_mode, " NPIV:0");
6155 6156 }
6156 6157
6157 6158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6158 6159 sli_mode, msi_mode, npiv_mode,
6159 6160 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":""));
6160 6161
6161 6162 wwpn = (uint8_t *)&hba->wwpn;
6162 6163 wwnn = (uint8_t *)&hba->wwnn;
6163 6164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6164 6165 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6165 6166 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6166 6167 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6167 6168 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6168 6169 wwnn[6], wwnn[7]);
6169 6170
6170 6171 for (i = 0; i < MAX_VPORTS; i++) {
6171 6172 port = &VPORT(i);
6172 6173
6173 6174 if (!(port->flag & EMLXS_PORT_CONFIG)) {
6174 6175 continue;
6175 6176 }
6176 6177
6177 6178 wwpn = (uint8_t *)&port->wwpn;
6178 6179 wwnn = (uint8_t *)&port->wwnn;
6179 6180
6180 6181 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6181 6182 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6182 6183 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6183 6184 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6184 6185 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6185 6186 wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6186 6187 }
6187 6188 port = &PPORT;
6188 6189
6189 6190 /*
6190 6191 * No dependency for Restricted login parameter.
6191 6192 */
6192 6193 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) {
6193 6194 port->flag |= EMLXS_PORT_RESTRICTED;
6194 6195 } else {
6195 6196 port->flag &= ~EMLXS_PORT_RESTRICTED;
6196 6197 }
6197 6198
6198 6199 /*
6199 6200 * Announce the device: ddi_report_dev() prints a banner at boot time,
6200 6201 * announcing the device pointed to by dip.
6201 6202 */
6202 6203 (void) ddi_report_dev(hba->dip);
6203 6204
6204 6205 return;
6205 6206
6206 6207 } /* emlxs_drv_banner() */
6207 6208
6208 6209
6209 6210 extern void
6210 6211 emlxs_get_fcode_version(emlxs_hba_t *hba)
6211 6212 {
6212 6213 emlxs_vpd_t *vpd = &VPD;
6213 6214 char *prop_str;
6214 6215 int status;
6215 6216
6216 6217 /* Setup fcode version property */
6217 6218 prop_str = NULL;
6218 6219 status =
6219 6220 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6220 6221 "fcode-version", (char **)&prop_str);
6221 6222
6222 6223 if (status == DDI_PROP_SUCCESS) {
6223 6224 bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6224 6225 (void) ddi_prop_free((void *)prop_str);
6225 6226 } else {
6226 6227 (void) strcpy(vpd->fcode_version, "none");
6227 6228 }
6228 6229
6229 6230 return;
6230 6231
6231 6232 } /* emlxs_get_fcode_version() */
6232 6233
6233 6234
6234 6235 static int
6235 6236 emlxs_hba_attach(dev_info_t *dip)
6236 6237 {
6237 6238 emlxs_hba_t *hba;
6238 6239 emlxs_port_t *port;
6239 6240 emlxs_config_t *cfg;
6240 6241 char *prop_str;
6241 6242 int ddiinst;
6242 6243 int32_t emlxinst;
6243 6244 int status;
6244 6245 uint32_t rval;
6245 6246 uint32_t init_flag = 0;
6246 6247 char local_pm_components[32];
6247 6248 #ifdef EMLXS_I386
6248 6249 uint32_t i;
6249 6250 #endif /* EMLXS_I386 */
6250 6251
6251 6252 ddiinst = ddi_get_instance(dip);
6252 6253 emlxinst = emlxs_add_instance(ddiinst);
6253 6254
6254 6255 if (emlxinst >= MAX_FC_BRDS) {
6255 6256 cmn_err(CE_WARN,
6256 6257 "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
6257 6258 "inst=%x", DRIVER_NAME, ddiinst);
6258 6259 return (DDI_FAILURE);
6259 6260 }
6260 6261
6261 6262 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
6262 6263 return (DDI_FAILURE);
6263 6264 }
6264 6265
6265 6266 if (emlxs_device.hba[emlxinst]) {
6266 6267 return (DDI_SUCCESS);
6267 6268 }
6268 6269
6269 6270 /* An adapter can accidentally be plugged into a slave-only PCI slot */
6270 6271 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
6271 6272 cmn_err(CE_WARN,
6272 6273 "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
6273 6274 DRIVER_NAME, ddiinst);
6274 6275 return (DDI_FAILURE);
6275 6276 }
6276 6277
6277 6278 /* Allocate emlxs_dev_ctl structure. */
6278 6279 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
6279 6280 cmn_err(CE_WARN,
6280 6281 "?%s%d: fca_hba_attach failed. Unable to allocate soft "
6281 6282 "state.", DRIVER_NAME, ddiinst);
6282 6283 return (DDI_FAILURE);
6283 6284 }
6284 6285 init_flag |= ATTACH_SOFT_STATE;
6285 6286
6286 6287 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
6287 6288 ddiinst)) == NULL) {
6288 6289 cmn_err(CE_WARN,
6289 6290 "?%s%d: fca_hba_attach failed. Unable to get soft state.",
6290 6291 DRIVER_NAME, ddiinst);
6291 6292 goto failed;
6292 6293 }
6293 6294 bzero((char *)hba, sizeof (emlxs_hba_t));
6294 6295
6295 6296 emlxs_device.hba[emlxinst] = hba;
6296 6297 emlxs_device.log[emlxinst] = &hba->log;
6297 6298
6298 6299 #ifdef DUMP_SUPPORT
6299 6300 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
6300 6301 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
6301 6302 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
6302 6303 #endif /* DUMP_SUPPORT */
6303 6304
6304 6305 hba->dip = dip;
6305 6306 hba->emlxinst = emlxinst;
6306 6307 hba->ddiinst = ddiinst;
6307 6308 hba->ini_mode = 0;
6308 6309 hba->tgt_mode = 0;
6309 6310
6310 6311 init_flag |= ATTACH_HBA;
6311 6312
6312 6313 /* Enable the physical port on this HBA */
6313 6314 port = &PPORT;
6314 6315 port->hba = hba;
6315 6316 port->vpi = 0;
6316 6317 port->flag |= EMLXS_PORT_ENABLE;
6317 6318
6318 6319 /* Allocate a transport structure */
6319 6320 hba->fca_tran =
6320 6321 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
6321 6322 if (hba->fca_tran == NULL) {
6322 6323 cmn_err(CE_WARN,
6323 6324 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
6324 6325 "memory.", DRIVER_NAME, ddiinst);
6325 6326 goto failed;
6326 6327 }
6327 6328 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
6328 6329 sizeof (fc_fca_tran_t));
6329 6330
6330 6331 /*
6331 6332 * Copy the global ddi_dma_attr to the local hba fields
6332 6333 */
6333 6334 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
6334 6335 sizeof (ddi_dma_attr_t));
6335 6336 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
6336 6337 sizeof (ddi_dma_attr_t));
6337 6338 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
6338 6339 sizeof (ddi_dma_attr_t));
6339 6340 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
6340 6341 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
6341 6342
6342 6343 /* Reset the fca_tran dma_attr fields to the per-hba copies */
6343 6344 hba->fca_tran->fca_dma_attr = &hba->dma_attr;
6344 6345 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
6345 6346 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
6346 6347 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
6347 6348 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
6348 6349 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
6349 6350 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
6350 6351 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
6351 6352
6352 6353 /* Set the transport structure pointer in our dip */
6353 6354 /* SFS may panic if we are in target only mode */
6354 6355 /* We will update the transport structure later */
6355 6356 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
6356 6357 init_flag |= ATTACH_FCA_TRAN;
6357 6358
6358 6359 /* Perform driver integrity check */
6359 6360 rval = emlxs_integrity_check(hba);
6360 6361 if (rval) {
6361 6362 cmn_err(CE_WARN,
6362 6363 "?%s%d: fca_hba_attach failed. Driver integrity check "
6363 6364 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
6364 6365 goto failed;
6365 6366 }
6366 6367
6367 6368 cfg = &CFG;
6368 6369
6369 6370 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
6370 6371 #ifdef MSI_SUPPORT
6371 6372 if ((void *)&ddi_intr_get_supported_types != NULL) {
6372 6373 hba->intr_flags |= EMLXS_MSI_ENABLED;
6373 6374 }
6374 6375 #endif /* MSI_SUPPORT */
6375 6376
6376 6377
6377 6378 /* Create the msg log file */
6378 6379 if (emlxs_msg_log_create(hba) == 0) {
6379 6380 cmn_err(CE_WARN,
6380 6381 "?%s%d: fca_hba_attach failed. Unable to create message "
6381 6382 "log", DRIVER_NAME, ddiinst);
6382 6383 goto failed;
6383 6384
6384 6385 }
6385 6386 init_flag |= ATTACH_LOG;
6386 6387
6387 6388 /* We can begin to use EMLXS_MSGF from this point on */
6388 6389
6389 6390 /*
6390 6391 * Find the I/O bus type If it is not a SBUS card,
6391 6392 * then it is a PCI card. Default is PCI_FC (0).
6392 6393 */
6393 6394 prop_str = NULL;
6394 6395 status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
6395 6396 (dev_info_t *)dip, 0, "name", (char **)&prop_str);
6396 6397
6397 6398 if (status == DDI_PROP_SUCCESS) {
6398 6399 if (strncmp(prop_str, "lpfs", 4) == 0) {
6399 6400 hba->bus_type = SBUS_FC;
6400 6401 }
6401 6402
6402 6403 (void) ddi_prop_free((void *)prop_str);
6403 6404 }
6404 6405
6405 6406 /*
6406 6407 * Copy DDS from the config method and update configuration parameters
6407 6408 */
6408 6409 (void) emlxs_get_props(hba);
6409 6410
6410 6411 #ifdef FMA_SUPPORT
6411 6412 hba->fm_caps = cfg[CFG_FM_CAPS].current;
6412 6413
6413 6414 emlxs_fm_init(hba);
6414 6415
6415 6416 init_flag |= ATTACH_FM;
6416 6417 #endif /* FMA_SUPPORT */
6417 6418
6418 6419 if (emlxs_map_bus(hba)) {
6419 6420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6420 6421 "Unable to map memory");
6421 6422 goto failed;
6422 6423
6423 6424 }
6424 6425 init_flag |= ATTACH_MAP_BUS;
6425 6426
6426 6427 /* Attempt to identify the adapter */
6427 6428 rval = emlxs_init_adapter_info(hba);
6428 6429
6429 6430 if (rval == 0) {
6430 6431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6431 6432 "Unable to get adapter info. Id:%d Device id:0x%x "
6432 6433 "Model:%s", hba->model_info.id,
6433 6434 hba->model_info.device_id, hba->model_info.model);
6434 6435 goto failed;
6435 6436 }
6436 6437 #define FILTER_ORACLE_BRANDED
6437 6438 #ifdef FILTER_ORACLE_BRANDED
6438 6439
6439 6440 /* Sun-branded adapters are not supported */
6440 6441 if (hba->model_info.flags & EMLXS_SUN_BRANDED) {
6441 6442 hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
6442 6443 }
6443 6444 #endif /* FILTER_ORACLE_BRANDED */
6444 6445
6445 6446 /* Check if adapter is not supported */
6446 6447 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
6447 6448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6448 6449 "Unsupported adapter found. Id:%d Device id:0x%x "
6449 6450 "SSDID:0x%x Model:%s", hba->model_info.id,
6450 6451 hba->model_info.device_id,
6451 6452 hba->model_info.ssdid, hba->model_info.model);
6452 6453 goto failed;
6453 6454 }
6454 6455 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
6455 6456 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
6456 6457 #ifdef EMLXS_I386
6457 6458 /*
6458 6459 * TigerShark has 64K limit for SG element size
6459 6460 * Do this for x86 alone. For SPARC, the driver
6460 6461 * breaks up the single SGE later on.
6461 6462 */
6462 6463 hba->dma_attr_ro.dma_attr_count_max = 0xffff;
6463 6464
6464 6465 i = cfg[CFG_MAX_XFER_SIZE].current;
6465 6466 /* Update SGL size based on max_xfer_size */
6466 6467 if (i > 688128) {
6467 6468 /* 688128 = (((2048 / 12) - 2) * 4096) */
6468 6469 hba->sli.sli4.mem_sgl_size = 4096;
6469 6470 } else if (i > 339968) {
6470 6471 /* 339968 = (((1024 / 12) - 2) * 4096) */
6471 6472 hba->sli.sli4.mem_sgl_size = 2048;
6472 6473 } else {
6473 6474 hba->sli.sli4.mem_sgl_size = 1024;
6474 6475 }
6475 6476 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
6476 6477 #endif /* EMLXS_I386 */
6477 6478 } else {
6478 6479 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
6479 6480 #ifdef EMLXS_I386
6480 6481 i = cfg[CFG_MAX_XFER_SIZE].current;
6481 6482 /* Update BPL size based on max_xfer_size */
6482 6483 if (i > 688128) {
6483 6484 /* 688128 = (((2048 / 12) - 2) * 4096) */
6484 6485 hba->sli.sli3.mem_bpl_size = 4096;
6485 6486 } else if (i > 339968) {
6486 6487 /* 339968 = (((1024 / 12) - 2) * 4096) */
6487 6488 hba->sli.sli3.mem_bpl_size = 2048;
6488 6489 } else {
6489 6490 hba->sli.sli3.mem_bpl_size = 1024;
6490 6491 }
6491 6492 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
6492 6493 #endif /* EMLXS_I386 */
6493 6494 }
6494 6495
6495 6496 #ifdef EMLXS_I386
6496 6497 /* Update dma_attr_sgllen based on BPL size */
6497 6498 hba->dma_attr.dma_attr_sgllen = i;
6498 6499 hba->dma_attr_ro.dma_attr_sgllen = i;
6499 6500 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
6500 6501 #endif /* EMLXS_I386 */
6501 6502
6502 6503 if (EMLXS_SLI_MAP_HDW(hba)) {
6503 6504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6504 6505 "Unable to map memory");
6505 6506 goto failed;
6506 6507
6507 6508 }
6508 6509 init_flag |= ATTACH_MAP_SLI;
6509 6510
6510 6511 /* Initialize the interrupts. But don't add them yet */
6511 6512 status = EMLXS_INTR_INIT(hba, 0);
6512 6513 if (status != DDI_SUCCESS) {
6513 6514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6514 6515 "Unable to initalize interrupt(s).");
6515 6516 goto failed;
6516 6517
6517 6518 }
6518 6519 init_flag |= ATTACH_INTR_INIT;
6519 6520
6520 6521 /* Initialize LOCKs */
6521 6522 emlxs_msg_lock_reinit(hba);
6522 6523 emlxs_lock_init(hba);
6523 6524 init_flag |= ATTACH_LOCK;
6524 6525
6525 6526 /* Create the event queue */
6526 6527 if (emlxs_event_queue_create(hba) == 0) {
6527 6528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6528 6529 "Unable to create event queue");
6529 6530
6530 6531 goto failed;
6531 6532
6532 6533 }
6533 6534 init_flag |= ATTACH_EVENTS;
6534 6535
6535 6536 /* Initialize the power management */
6536 6537 mutex_enter(&EMLXS_PM_LOCK);
6537 6538 hba->pm_state = EMLXS_PM_IN_ATTACH;
6538 6539 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
6539 6540 hba->pm_busy = 0;
6540 6541 #ifdef IDLE_TIMER
6541 6542 hba->pm_active = 1;
6542 6543 hba->pm_idle_timer = 0;
6543 6544 #endif /* IDLE_TIMER */
6544 6545 mutex_exit(&EMLXS_PM_LOCK);
6545 6546
6546 6547 /* Set the pm component name */
6547 6548 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME,
6548 6549 ddiinst);
6549 6550 emlxs_pm_components[0] = local_pm_components;
6550 6551
6551 6552 /* Check if power management support is enabled */
6552 6553 if (cfg[CFG_PM_SUPPORT].current) {
6553 6554 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
6554 6555 "pm-components", emlxs_pm_components,
6555 6556 sizeof (emlxs_pm_components) /
6556 6557 sizeof (emlxs_pm_components[0])) !=
6557 6558 DDI_PROP_SUCCESS) {
6558 6559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6559 6560 "Unable to create pm components.");
6560 6561 goto failed;
6561 6562 }
6562 6563 }
6563 6564
6564 6565 /* Needed for suspend and resume support */
6565 6566 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
6566 6567 "needs-suspend-resume");
6567 6568 init_flag |= ATTACH_PROP;
6568 6569
6569 6570 emlxs_thread_spawn_create(hba);
6570 6571 init_flag |= ATTACH_SPAWN;
6571 6572
6572 6573 emlxs_thread_create(hba, &hba->iodone_thread);
6573 6574
6574 6575 init_flag |= ATTACH_THREAD;
6575 6576
6576 6577 /* Setup initiator / target ports */
6577 6578 emlxs_set_mode(hba);
6578 6579
6579 6580 /* If driver did not attach to either stack, */
6580 6581 /* then driver attach failed */
6581 6582 if (!hba->tgt_mode && !hba->ini_mode) {
6582 6583 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6583 6584 "Driver interfaces not enabled.");
6584 6585 goto failed;
6585 6586 }
6586 6587
6587 6588 /*
6588 6589 * Initialize HBA
6589 6590 */
6590 6591
6591 6592 /* Set initial state */
6592 6593 mutex_enter(&EMLXS_PORT_LOCK);
6593 6594 emlxs_diag_state = DDI_OFFDI;
6594 6595 hba->flag |= FC_OFFLINE_MODE;
6595 6596 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
6596 6597 mutex_exit(&EMLXS_PORT_LOCK);
6597 6598
6598 6599 if (status = emlxs_online(hba)) {
6599 6600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6600 6601 "Unable to initialize adapter.");
6601 6602 goto failed;
6602 6603 }
6603 6604 init_flag |= ATTACH_ONLINE;
6604 6605
6605 6606 /* This is to ensure that the model property is properly set */
6606 6607 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
6607 6608 hba->model_info.model);
6608 6609
6609 6610 /* Create the device node. */
6610 6611 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
6611 6612 DDI_FAILURE) {
6612 6613 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
6613 6614 "Unable to create device node.");
6614 6615 goto failed;
6615 6616 }
6616 6617 init_flag |= ATTACH_NODE;
6617 6618
6618 6619 /* Attach initiator now */
6619 6620 /* This must come after emlxs_online() */
6620 6621 emlxs_fca_attach(hba);
6621 6622 init_flag |= ATTACH_FCA;
6622 6623
6623 6624 /* Initialize kstat information */
6624 6625 hba->kstat = kstat_create(DRIVER_NAME,
6625 6626 ddiinst, "statistics", "controller",
6626 6627 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
6627 6628 KSTAT_FLAG_VIRTUAL);
6628 6629
6629 6630 if (hba->kstat == NULL) {
6630 6631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6631 6632 "kstat_create failed.");
6632 6633 } else {
6633 6634 hba->kstat->ks_data = (void *)&hba->stats;
6634 6635 kstat_install(hba->kstat);
6635 6636 init_flag |= ATTACH_KSTAT;
6636 6637 }
6637 6638
6638 6639 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
6639 6640 /* Setup virtual port properties */
6640 6641 emlxs_read_vport_prop(hba);
6641 6642 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
6642 6643
6643 6644
6644 6645 #ifdef DHCHAP_SUPPORT
6645 6646 emlxs_dhc_attach(hba);
6646 6647 init_flag |= ATTACH_DHCHAP;
6647 6648 #endif /* DHCHAP_SUPPORT */
6648 6649
6649 6650 /* Display the driver banner now */
6650 6651 emlxs_drv_banner(hba);
6651 6652
6652 6653 /* Raise the power level */
6653 6654
6654 6655 /*
6655 6656 * This will not execute emlxs_hba_resume because
6656 6657 * EMLXS_PM_IN_ATTACH is set
6657 6658 */
6658 6659 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
6659 6660 /* Set power up anyway. This should not happen! */
6660 6661 mutex_enter(&EMLXS_PM_LOCK);
6661 6662 hba->pm_level = EMLXS_PM_ADAPTER_UP;
6662 6663 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6663 6664 mutex_exit(&EMLXS_PM_LOCK);
6664 6665 } else {
6665 6666 mutex_enter(&EMLXS_PM_LOCK);
6666 6667 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
6667 6668 mutex_exit(&EMLXS_PM_LOCK);
6668 6669 }
6669 6670
6670 6671 #ifdef SFCT_SUPPORT
6671 6672 /* Do this last */
6672 6673 emlxs_fct_attach(hba);
6673 6674 init_flag |= ATTACH_FCT;
6674 6675 #endif /* SFCT_SUPPORT */
6675 6676
6676 6677 return (DDI_SUCCESS);
6677 6678
6678 6679 failed:
6679 6680
6680 6681 emlxs_driver_remove(dip, init_flag, 1);
6681 6682
6682 6683 return (DDI_FAILURE);
6683 6684
6684 6685 } /* emlxs_hba_attach() */
6685 6686
6686 6687
6687 6688 static int
6688 6689 emlxs_hba_detach(dev_info_t *dip)
6689 6690 {
6690 6691 emlxs_hba_t *hba;
6691 6692 emlxs_port_t *port;
6692 6693 int ddiinst;
6693 6694 int count;
6694 6695 uint32_t init_flag = (uint32_t)-1;
6695 6696
6696 6697 ddiinst = ddi_get_instance(dip);
6697 6698 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6698 6699 port = &PPORT;
6699 6700
6700 6701 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
6701 6702
6702 6703 mutex_enter(&EMLXS_PM_LOCK);
6703 6704 hba->pm_state |= EMLXS_PM_IN_DETACH;
6704 6705 mutex_exit(&EMLXS_PM_LOCK);
6705 6706
6706 6707 /* Lower the power level */
6707 6708 /*
6708 6709 * This will not suspend the driver since the
6709 6710 * EMLXS_PM_IN_DETACH has been set
6710 6711 */
6711 6712 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
6712 6713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6713 6714 "Unable to lower power.");
6714 6715
6715 6716 mutex_enter(&EMLXS_PM_LOCK);
6716 6717 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6717 6718 mutex_exit(&EMLXS_PM_LOCK);
6718 6719
6719 6720 return (DDI_FAILURE);
6720 6721 }
6721 6722
6722 6723 /* Take the adapter offline first, if not already */
6723 6724 if (emlxs_offline(hba) != 0) {
6724 6725 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
6725 6726 "Unable to take adapter offline.");
6726 6727
6727 6728 mutex_enter(&EMLXS_PM_LOCK);
6728 6729 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
6729 6730 mutex_exit(&EMLXS_PM_LOCK);
6730 6731
6731 6732 (void) emlxs_pm_raise_power(dip);
6732 6733
6733 6734 return (DDI_FAILURE);
6734 6735 }
6735 6736 /* Check ub buffer pools */
6736 6737 if (port->ub_pool) {
6737 6738 mutex_enter(&EMLXS_UB_LOCK);
6738 6739
6739 6740 /* Wait up to 10 seconds for all ub pools to be freed */
6740 6741 count = 10 * 2;
6741 6742 while (port->ub_pool && count) {
6742 6743 mutex_exit(&EMLXS_UB_LOCK);
6743 6744 delay(drv_usectohz(500000)); /* half second wait */
6744 6745 count--;
6745 6746 mutex_enter(&EMLXS_UB_LOCK);
6746 6747 }
6747 6748
6748 6749 if (port->ub_pool) {
6749 6750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6750 6751 "fca_unbind_port: Unsolicited buffers still "
6751 6752 "active. port=%p. Destroying...", port);
6752 6753
6753 6754 /* Destroy all pools */
6754 6755 while (port->ub_pool) {
6755 6756 emlxs_ub_destroy(port, port->ub_pool);
6756 6757 }
6757 6758 }
6758 6759
6759 6760 mutex_exit(&EMLXS_UB_LOCK);
6760 6761 }
6761 6762 init_flag &= ~ATTACH_ONLINE;
6762 6763
6763 6764 /* Remove the driver instance */
6764 6765 emlxs_driver_remove(dip, init_flag, 0);
6765 6766
6766 6767 return (DDI_SUCCESS);
6767 6768
6768 6769 } /* emlxs_hba_detach() */
6769 6770
6770 6771
6771 6772 extern int
6772 6773 emlxs_map_bus(emlxs_hba_t *hba)
6773 6774 {
6774 6775 emlxs_port_t *port = &PPORT;
6775 6776 dev_info_t *dip;
6776 6777 ddi_device_acc_attr_t dev_attr;
6777 6778 int status;
6778 6779
6779 6780 dip = (dev_info_t *)hba->dip;
6780 6781 dev_attr = emlxs_dev_acc_attr;
6781 6782
6782 6783 if (hba->bus_type == SBUS_FC) {
6783 6784 if (hba->pci_acc_handle == 0) {
6784 6785 status = ddi_regs_map_setup(dip,
6785 6786 SBUS_DFLY_PCI_CFG_RINDEX,
6786 6787 (caddr_t *)&hba->pci_addr,
6787 6788 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6788 6789 if (status != DDI_SUCCESS) {
6789 6790 EMLXS_MSGF(EMLXS_CONTEXT,
6790 6791 &emlxs_attach_failed_msg,
6791 6792 "(SBUS) ddi_regs_map_setup PCI failed. "
6792 6793 "status=%x", status);
6793 6794 goto failed;
6794 6795 }
6795 6796 }
6796 6797
6797 6798 if (hba->sbus_pci_handle == 0) {
6798 6799 status = ddi_regs_map_setup(dip,
6799 6800 SBUS_TITAN_PCI_CFG_RINDEX,
6800 6801 (caddr_t *)&hba->sbus_pci_addr,
6801 6802 0, 0, &dev_attr, &hba->sbus_pci_handle);
6802 6803 if (status != DDI_SUCCESS) {
6803 6804 EMLXS_MSGF(EMLXS_CONTEXT,
6804 6805 &emlxs_attach_failed_msg,
6805 6806 "(SBUS) ddi_regs_map_setup TITAN PCI "
6806 6807 "failed. status=%x", status);
6807 6808 goto failed;
6808 6809 }
6809 6810 }
6810 6811
6811 6812 } else { /* ****** PCI ****** */
6812 6813
6813 6814 if (hba->pci_acc_handle == 0) {
6814 6815 status = ddi_regs_map_setup(dip,
6815 6816 PCI_CFG_RINDEX,
6816 6817 (caddr_t *)&hba->pci_addr,
6817 6818 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
6818 6819 if (status != DDI_SUCCESS) {
6819 6820 EMLXS_MSGF(EMLXS_CONTEXT,
6820 6821 &emlxs_attach_failed_msg,
6821 6822 "(PCI) ddi_regs_map_setup PCI failed. "
6822 6823 "status=%x", status);
6823 6824 goto failed;
6824 6825 }
6825 6826 }
6826 6827 #ifdef EMLXS_I386
6827 6828 /* Setting up PCI configure space */
6828 6829 (void) ddi_put16(hba->pci_acc_handle,
6829 6830 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
6830 6831 CMD_CFG_VALUE | CMD_IO_ENBL);
6831 6832
6832 6833 #ifdef FMA_SUPPORT
6833 6834 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
6834 6835 != DDI_FM_OK) {
6835 6836 EMLXS_MSGF(EMLXS_CONTEXT,
6836 6837 &emlxs_invalid_access_handle_msg, NULL);
6837 6838 goto failed;
6838 6839 }
6839 6840 #endif /* FMA_SUPPORT */
6840 6841
6841 6842 #endif /* EMLXS_I386 */
6842 6843
6843 6844 }
6844 6845 return (0);
6845 6846
6846 6847 failed:
6847 6848
6848 6849 emlxs_unmap_bus(hba);
6849 6850 return (ENOMEM);
6850 6851
6851 6852 } /* emlxs_map_bus() */
6852 6853
6853 6854
6854 6855 extern void
6855 6856 emlxs_unmap_bus(emlxs_hba_t *hba)
6856 6857 {
6857 6858 if (hba->pci_acc_handle) {
6858 6859 (void) ddi_regs_map_free(&hba->pci_acc_handle);
6859 6860 hba->pci_acc_handle = 0;
6860 6861 }
6861 6862
6862 6863 if (hba->sbus_pci_handle) {
6863 6864 (void) ddi_regs_map_free(&hba->sbus_pci_handle);
6864 6865 hba->sbus_pci_handle = 0;
6865 6866 }
6866 6867
6867 6868 return;
6868 6869
6869 6870 } /* emlxs_unmap_bus() */
6870 6871
6871 6872
6872 6873 static int
6873 6874 emlxs_get_props(emlxs_hba_t *hba)
6874 6875 {
6875 6876 emlxs_config_t *cfg;
6876 6877 uint32_t i;
6877 6878 char string[256];
6878 6879 uint32_t new_value;
6879 6880
6880 6881 /* Initialize each parameter */
6881 6882 for (i = 0; i < NUM_CFG_PARAM; i++) {
6882 6883 cfg = &hba->config[i];
6883 6884
6884 6885 /* Ensure strings are terminated */
6885 6886 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
6886 6887 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0;
6887 6888
6888 6889 /* Set the current value to the default value */
6889 6890 new_value = cfg->def;
6890 6891
6891 6892 /* First check for the global setting */
6892 6893 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6893 6894 (void *)hba->dip, DDI_PROP_DONTPASS,
6894 6895 cfg->string, new_value);
6895 6896
6896 6897 /* Now check for the per adapter ddiinst setting */
6897 6898 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst,
6898 6899 cfg->string);
6899 6900
6900 6901 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6901 6902 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
6902 6903
6903 6904 /* Now check the parameter */
6904 6905 cfg->current = emlxs_check_parm(hba, i, new_value);
6905 6906 }
6906 6907
6907 6908 return (0);
6908 6909
6909 6910 } /* emlxs_get_props() */
6910 6911
6911 6912
6912 6913 extern uint32_t
6913 6914 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
6914 6915 {
6915 6916 emlxs_port_t *port = &PPORT;
6916 6917 uint32_t i;
6917 6918 emlxs_config_t *cfg;
6918 6919 emlxs_vpd_t *vpd = &VPD;
6919 6920
6920 6921 if (index > NUM_CFG_PARAM) {
6921 6922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
6922 6923 "emlxs_check_parm failed. Invalid index = %d", index);
6923 6924
6924 6925 return (new_value);
6925 6926 }
6926 6927
6927 6928 cfg = &hba->config[index];
6928 6929
6929 6930 if (new_value > cfg->hi) {
6930 6931 new_value = cfg->def;
6931 6932 } else if (new_value < cfg->low) {
6932 6933 new_value = cfg->def;
6933 6934 }
6934 6935
6935 6936 /* Perform additional checks */
6936 6937 switch (index) {
6937 6938 case CFG_NPIV_ENABLE:
6938 6939 if (hba->tgt_mode) {
6939 6940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6940 6941 "enable-npiv: Not supported in target mode. "
6941 6942 "Disabling.");
6942 6943
6943 6944 new_value = 0;
6944 6945 }
6945 6946 break;
6946 6947
6947 6948 #ifdef DHCHAP_SUPPORT
6948 6949 case CFG_AUTH_ENABLE:
6949 6950 if (hba->tgt_mode) {
6950 6951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
6951 6952 "enable-auth: Not supported in target mode. "
6952 6953 "Disabling.");
6953 6954
6954 6955 new_value = 0;
6955 6956 }
6956 6957 break;
6957 6958 #endif /* DHCHAP_SUPPORT */
6958 6959
6959 6960 case CFG_NUM_NODES:
6960 6961 switch (new_value) {
6961 6962 case 1:
6962 6963 case 2:
6963 6964 /* Must have at least 3 if not 0 */
6964 6965 return (3);
6965 6966
6966 6967 default:
6967 6968 break;
6968 6969 }
6969 6970 break;
6970 6971
6971 6972 case CFG_FW_CHECK:
6972 6973 /* The 0x2 bit implies the 0x1 bit will also be set */
6973 6974 if (new_value & 0x2) {
6974 6975 new_value |= 0x1;
6975 6976 }
6976 6977
6977 6978 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
6978 6979 if (!(new_value & 0x3) && (new_value & 0x4)) {
6979 6980 new_value &= ~0x4;
6980 6981 }
6981 6982 break;
6982 6983
6983 6984 case CFG_LINK_SPEED:
6984 6985 if (vpd->link_speed) {
6985 6986 switch (new_value) {
6986 6987 case 0:
6987 6988 break;
6988 6989
6989 6990 case 1:
6990 6991 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
6991 6992 new_value = 0;
6992 6993
6993 6994 EMLXS_MSGF(EMLXS_CONTEXT,
6994 6995 &emlxs_init_msg,
6995 6996 "link-speed: 1Gb not supported "
6996 6997 "by adapter. Switching to auto "
6997 6998 "detect.");
6998 6999 }
6999 7000 break;
7000 7001
7001 7002 case 2:
7002 7003 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7003 7004 new_value = 0;
7004 7005
7005 7006 EMLXS_MSGF(EMLXS_CONTEXT,
7006 7007 &emlxs_init_msg,
7007 7008 "link-speed: 2Gb not supported "
7008 7009 "by adapter. Switching to auto "
7009 7010 "detect.");
7010 7011 }
7011 7012 break;
7012 7013 case 4:
7013 7014 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7014 7015 new_value = 0;
7015 7016
7016 7017 EMLXS_MSGF(EMLXS_CONTEXT,
7017 7018 &emlxs_init_msg,
7018 7019 "link-speed: 4Gb not supported "
7019 7020 "by adapter. Switching to auto "
7020 7021 "detect.");
7021 7022 }
7022 7023 break;
7023 7024
7024 7025 case 8:
7025 7026 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7026 7027 new_value = 0;
7027 7028
7028 7029 EMLXS_MSGF(EMLXS_CONTEXT,
7029 7030 &emlxs_init_msg,
7030 7031 "link-speed: 8Gb not supported "
7031 7032 "by adapter. Switching to auto "
7032 7033 "detect.");
7033 7034 }
7034 7035 break;
7035 7036
7036 7037 case 10:
7037 7038 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
7038 7039 new_value = 0;
7039 7040
7040 7041 EMLXS_MSGF(EMLXS_CONTEXT,
7041 7042 &emlxs_init_msg,
7042 7043 "link-speed: 10Gb not supported "
7043 7044 "by adapter. Switching to auto "
7044 7045 "detect.");
7045 7046 }
7046 7047 break;
7047 7048
7048 7049 default:
7049 7050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7050 7051 "link-speed: Invalid value=%d provided. "
7051 7052 "Switching to auto detect.",
7052 7053 new_value);
7053 7054
7054 7055 new_value = 0;
7055 7056 }
7056 7057 } else { /* Perform basic validity check */
7057 7058
7058 7059 /* Perform additional check on link speed */
7059 7060 switch (new_value) {
7060 7061 case 0:
7061 7062 case 1:
7062 7063 case 2:
7063 7064 case 4:
7064 7065 case 8:
7065 7066 case 10:
7066 7067 /* link-speed is a valid choice */
7067 7068 break;
7068 7069
7069 7070 default:
7070 7071 new_value = cfg->def;
7071 7072 }
7072 7073 }
7073 7074 break;
7074 7075
7075 7076 case CFG_TOPOLOGY:
7076 7077 /* Perform additional check on topology */
7077 7078 switch (new_value) {
7078 7079 case 0:
7079 7080 case 2:
7080 7081 case 4:
7081 7082 case 6:
7082 7083 /* topology is a valid choice */
7083 7084 break;
7084 7085
7085 7086 default:
7086 7087 return (cfg->def);
7087 7088 }
7088 7089 break;
7089 7090
7090 7091 #ifdef DHCHAP_SUPPORT
7091 7092 case CFG_AUTH_TYPE:
7092 7093 {
7093 7094 uint32_t shift;
7094 7095 uint32_t mask;
7095 7096
7096 7097 /* Perform additional check on auth type */
7097 7098 shift = 12;
7098 7099 mask = 0xF000;
7099 7100 for (i = 0; i < 4; i++) {
7100 7101 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7101 7102 return (cfg->def);
7102 7103 }
7103 7104
7104 7105 shift -= 4;
7105 7106 mask >>= 4;
7106 7107 }
7107 7108 break;
7108 7109 }
7109 7110
7110 7111 case CFG_AUTH_HASH:
7111 7112 {
7112 7113 uint32_t shift;
7113 7114 uint32_t mask;
7114 7115
7115 7116 /* Perform additional check on auth hash */
7116 7117 shift = 12;
7117 7118 mask = 0xF000;
7118 7119 for (i = 0; i < 4; i++) {
7119 7120 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7120 7121 return (cfg->def);
7121 7122 }
7122 7123
7123 7124 shift -= 4;
7124 7125 mask >>= 4;
7125 7126 }
7126 7127 break;
7127 7128 }
7128 7129
7129 7130 case CFG_AUTH_GROUP:
7130 7131 {
7131 7132 uint32_t shift;
7132 7133 uint32_t mask;
7133 7134
7134 7135 /* Perform additional check on auth group */
7135 7136 shift = 28;
7136 7137 mask = 0xF0000000;
7137 7138 for (i = 0; i < 8; i++) {
7138 7139 if (((new_value & mask) >> shift) >
7139 7140 DFC_AUTH_GROUP_MAX) {
7140 7141 return (cfg->def);
7141 7142 }
7142 7143
7143 7144 shift -= 4;
7144 7145 mask >>= 4;
7145 7146 }
7146 7147 break;
7147 7148 }
7148 7149
7149 7150 case CFG_AUTH_INTERVAL:
7150 7151 if (new_value < 10) {
7151 7152 return (10);
7152 7153 }
7153 7154 break;
7154 7155
7155 7156
7156 7157 #endif /* DHCHAP_SUPPORT */
7157 7158
7158 7159 } /* switch */
7159 7160
7160 7161 return (new_value);
7161 7162
7162 7163 } /* emlxs_check_parm() */
7163 7164
7164 7165
7165 7166 extern uint32_t
7166 7167 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7167 7168 {
7168 7169 emlxs_port_t *port = &PPORT;
7169 7170 emlxs_port_t *vport;
7170 7171 uint32_t vpi;
7171 7172 emlxs_config_t *cfg;
7172 7173 uint32_t old_value;
7173 7174
7174 7175 if (index > NUM_CFG_PARAM) {
7175 7176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7176 7177 "emlxs_set_parm failed. Invalid index = %d", index);
7177 7178
7178 7179 return ((uint32_t)FC_FAILURE);
7179 7180 }
7180 7181
7181 7182 cfg = &hba->config[index];
7182 7183
7183 7184 if (!(cfg->flags & PARM_DYNAMIC)) {
7184 7185 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7185 7186 "emlxs_set_parm failed. %s is not dynamic.", cfg->string);
7186 7187
7187 7188 return ((uint32_t)FC_FAILURE);
7188 7189 }
7189 7190
7190 7191 /* Check new value */
7191 7192 old_value = new_value;
7192 7193 new_value = emlxs_check_parm(hba, index, new_value);
7193 7194
7194 7195 if (old_value != new_value) {
7195 7196 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7196 7197 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x",
7197 7198 cfg->string, old_value, new_value);
7198 7199 }
7199 7200
7200 7201 /* Return now if no actual change */
7201 7202 if (new_value == cfg->current) {
7202 7203 return (FC_SUCCESS);
7203 7204 }
7204 7205
7205 7206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7206 7207 "emlxs_set_parm: %s changing. 0x%x --> 0x%x",
7207 7208 cfg->string, cfg->current, new_value);
7208 7209
7209 7210 old_value = cfg->current;
7210 7211 cfg->current = new_value;
7211 7212
7212 7213 /* React to change if needed */
7213 7214 switch (index) {
7214 7215
7215 7216 case CFG_PCI_MAX_READ:
7216 7217 /* Update MXR */
7217 7218 emlxs_pcix_mxr_update(hba, 1);
7218 7219 break;
7219 7220
7220 7221 case CFG_SLI_MODE:
7221 7222 /* Check SLI mode */
7222 7223 if ((hba->sli_mode == 3) && (new_value == 2)) {
7223 7224 /* All vports must be disabled first */
7224 7225 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7225 7226 vport = &VPORT(vpi);
7226 7227
7227 7228 if (vport->flag & EMLXS_PORT_ENABLE) {
7228 7229 /* Reset current value */
7229 7230 cfg->current = old_value;
7230 7231
7231 7232 EMLXS_MSGF(EMLXS_CONTEXT,
7232 7233 &emlxs_sfs_debug_msg,
7233 7234 "emlxs_set_parm failed. %s: vpi=%d "
7234 7235 "still enabled. Value restored to "
7235 7236 "0x%x.", cfg->string, vpi,
7236 7237 old_value);
7237 7238
7238 7239 return (2);
7239 7240 }
7240 7241 }
7241 7242 }
7242 7243 break;
7243 7244
7244 7245 case CFG_NPIV_ENABLE:
7245 7246 /* Check if NPIV is being disabled */
7246 7247 if ((old_value == 1) && (new_value == 0)) {
7247 7248 /* All vports must be disabled first */
7248 7249 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
7249 7250 vport = &VPORT(vpi);
7250 7251
7251 7252 if (vport->flag & EMLXS_PORT_ENABLE) {
7252 7253 /* Reset current value */
7253 7254 cfg->current = old_value;
7254 7255
7255 7256 EMLXS_MSGF(EMLXS_CONTEXT,
7256 7257 &emlxs_sfs_debug_msg,
7257 7258 "emlxs_set_parm failed. %s: vpi=%d "
7258 7259 "still enabled. Value restored to "
7259 7260 "0x%x.", cfg->string, vpi,
7260 7261 old_value);
7261 7262
7262 7263 return (2);
7263 7264 }
7264 7265 }
7265 7266 }
7266 7267
7267 7268 /* Trigger adapter reset */
7268 7269 /* (void) emlxs_reset(port, FC_FCA_RESET); */
7269 7270
7270 7271 break;
7271 7272
7272 7273
7273 7274 case CFG_VPORT_RESTRICTED:
7274 7275 for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
7275 7276 vport = &VPORT(vpi);
7276 7277
7277 7278 if (!(vport->flag & EMLXS_PORT_CONFIG)) {
7278 7279 continue;
7279 7280 }
7280 7281
7281 7282 if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
7282 7283 continue;
7283 7284 }
7284 7285
7285 7286 if (new_value) {
7286 7287 vport->flag |= EMLXS_PORT_RESTRICTED;
7287 7288 } else {
7288 7289 vport->flag &= ~EMLXS_PORT_RESTRICTED;
7289 7290 }
7290 7291 }
7291 7292
7292 7293 break;
7293 7294
7294 7295 #ifdef DHCHAP_SUPPORT
7295 7296 case CFG_AUTH_ENABLE:
7296 7297 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
7297 7298 break;
7298 7299
7299 7300 case CFG_AUTH_TMO:
7300 7301 hba->auth_cfg.authentication_timeout = cfg->current;
7301 7302 break;
7302 7303
7303 7304 case CFG_AUTH_MODE:
7304 7305 hba->auth_cfg.authentication_mode = cfg->current;
7305 7306 break;
7306 7307
7307 7308 case CFG_AUTH_BIDIR:
7308 7309 hba->auth_cfg.bidirectional = cfg->current;
7309 7310 break;
7310 7311
7311 7312 case CFG_AUTH_TYPE:
7312 7313 hba->auth_cfg.authentication_type_priority[0] =
7313 7314 (cfg->current & 0xF000) >> 12;
7314 7315 hba->auth_cfg.authentication_type_priority[1] =
7315 7316 (cfg->current & 0x0F00) >> 8;
7316 7317 hba->auth_cfg.authentication_type_priority[2] =
7317 7318 (cfg->current & 0x00F0) >> 4;
7318 7319 hba->auth_cfg.authentication_type_priority[3] =
7319 7320 (cfg->current & 0x000F);
7320 7321 break;
7321 7322
7322 7323 case CFG_AUTH_HASH:
7323 7324 hba->auth_cfg.hash_priority[0] =
7324 7325 (cfg->current & 0xF000) >> 12;
7325 7326 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
7326 7327 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
7327 7328 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
7328 7329 break;
7329 7330
7330 7331 case CFG_AUTH_GROUP:
7331 7332 hba->auth_cfg.dh_group_priority[0] =
7332 7333 (cfg->current & 0xF0000000) >> 28;
7333 7334 hba->auth_cfg.dh_group_priority[1] =
7334 7335 (cfg->current & 0x0F000000) >> 24;
7335 7336 hba->auth_cfg.dh_group_priority[2] =
7336 7337 (cfg->current & 0x00F00000) >> 20;
7337 7338 hba->auth_cfg.dh_group_priority[3] =
7338 7339 (cfg->current & 0x000F0000) >> 16;
7339 7340 hba->auth_cfg.dh_group_priority[4] =
7340 7341 (cfg->current & 0x0000F000) >> 12;
7341 7342 hba->auth_cfg.dh_group_priority[5] =
7342 7343 (cfg->current & 0x00000F00) >> 8;
7343 7344 hba->auth_cfg.dh_group_priority[6] =
7344 7345 (cfg->current & 0x000000F0) >> 4;
7345 7346 hba->auth_cfg.dh_group_priority[7] =
7346 7347 (cfg->current & 0x0000000F);
7347 7348 break;
7348 7349
7349 7350 case CFG_AUTH_INTERVAL:
7350 7351 hba->auth_cfg.reauthenticate_time_interval = cfg->current;
7351 7352 break;
7352 7353 #endif /* DHCHAP_SUPPORT */
7353 7354
7354 7355 }
7355 7356
7356 7357 return (FC_SUCCESS);
7357 7358
7358 7359 } /* emlxs_set_parm() */
7359 7360
7360 7361
7361 7362 /*
7362 7363 * emlxs_mem_alloc OS specific routine for memory allocation / mapping
7363 7364 *
7364 7365 * The buf_info->flags field describes the memory operation requested.
7365 7366 *
7366 7367 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA
7367 7368 * Virtual address is supplied in buf_info->virt
7368 7369 * DMA mapping flag is in buf_info->align
7369 7370 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
7370 7371 * The mapped physical address is returned buf_info->phys
7371 7372 *
7372 7373 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
7373 7374 * if FC_MBUF_DMA is set the memory is also mapped for DMA
7374 7375 * The byte alignment of the memory request is supplied in buf_info->align
7375 7376 * The byte size of the memory request is supplied in buf_info->size
7376 7377 * The virtual address is returned buf_info->virt
7377 7378 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
7378 7379 */
7379 7380 extern uint8_t *
7380 7381 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7381 7382 {
7382 7383 emlxs_port_t *port = &PPORT;
7383 7384 ddi_dma_attr_t dma_attr;
7384 7385 ddi_device_acc_attr_t dev_attr;
7385 7386 uint_t cookie_count;
7386 7387 size_t dma_reallen;
7387 7388 ddi_dma_cookie_t dma_cookie;
7388 7389 uint_t dma_flag;
7389 7390 int status;
7390 7391
7391 7392 dma_attr = hba->dma_attr_1sg;
7392 7393 dev_attr = emlxs_data_acc_attr;
7393 7394
7394 7395 if (buf_info->flags & FC_MBUF_SNGLSG) {
7395 7396 dma_attr.dma_attr_sgllen = 1;
7396 7397 }
7397 7398
7398 7399 if (buf_info->flags & FC_MBUF_DMA32) {
7399 7400 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
7400 7401 }
7401 7402
7402 7403 if (buf_info->flags & FC_MBUF_PHYSONLY) {
7403 7404
7404 7405 if (buf_info->virt == NULL) {
7405 7406 goto done;
7406 7407 }
7407 7408
7408 7409 /*
7409 7410 * Allocate the DMA handle for this DMA object
7410 7411 */
7411 7412 status = ddi_dma_alloc_handle((void *)hba->dip,
7412 7413 &dma_attr, DDI_DMA_DONTWAIT,
7413 7414 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
7414 7415 if (status != DDI_SUCCESS) {
7415 7416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7416 7417 "ddi_dma_alloc_handle failed: size=%x align=%x "
7417 7418 "flags=%x", buf_info->size, buf_info->align,
7418 7419 buf_info->flags);
7419 7420
7420 7421 buf_info->phys = 0;
7421 7422 buf_info->dma_handle = 0;
7422 7423 goto done;
7423 7424 }
7424 7425
7425 7426 switch (buf_info->align) {
7426 7427 case DMA_READ_WRITE:
7427 7428 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
7428 7429 break;
7429 7430 case DMA_READ_ONLY:
7430 7431 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
7431 7432 break;
7432 7433 case DMA_WRITE_ONLY:
7433 7434 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
7434 7435 break;
7435 7436 }
7436 7437
7437 7438 /* Map this page of memory */
7438 7439 status = ddi_dma_addr_bind_handle(
7439 7440 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7440 7441 (caddr_t)buf_info->virt, (size_t)buf_info->size,
7441 7442 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
7442 7443 &cookie_count);
7443 7444
7444 7445 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7445 7446 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7446 7447 "ddi_dma_addr_bind_handle failed: status=%x "
7447 7448 "count=%x flags=%x", status, cookie_count,
7448 7449 buf_info->flags);
7449 7450
7450 7451 (void) ddi_dma_free_handle(
7451 7452 (ddi_dma_handle_t *)&buf_info->dma_handle);
7452 7453 buf_info->phys = 0;
7453 7454 buf_info->dma_handle = 0;
7454 7455 goto done;
7455 7456 }
7456 7457
7457 7458 if (hba->bus_type == SBUS_FC) {
7458 7459
7459 7460 int32_t burstsizes_limit = 0xff;
7460 7461 int32_t ret_burst;
7461 7462
7462 7463 ret_burst = ddi_dma_burstsizes(
7463 7464 buf_info->dma_handle) & burstsizes_limit;
7464 7465 if (ddi_dma_set_sbus64(buf_info->dma_handle,
7465 7466 ret_burst) == DDI_FAILURE) {
7466 7467 EMLXS_MSGF(EMLXS_CONTEXT,
7467 7468 &emlxs_mem_alloc_failed_msg,
7468 7469 "ddi_dma_set_sbus64 failed.");
7469 7470 }
7470 7471 }
7471 7472
7472 7473 /* Save Physical address */
7473 7474 buf_info->phys = dma_cookie.dmac_laddress;
7474 7475
7475 7476 /*
7476 7477 * Just to be sure, let's add this
7477 7478 */
7478 7479 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7479 7480 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7480 7481
7481 7482 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7482 7483
7483 7484 dma_attr.dma_attr_align = buf_info->align;
7484 7485
7485 7486 /*
7486 7487 * Allocate the DMA handle for this DMA object
7487 7488 */
7488 7489 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
7489 7490 DDI_DMA_DONTWAIT, NULL,
7490 7491 (ddi_dma_handle_t *)&buf_info->dma_handle);
7491 7492 if (status != DDI_SUCCESS) {
7492 7493 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7493 7494 "ddi_dma_alloc_handle failed: size=%x align=%x "
7494 7495 "flags=%x", buf_info->size, buf_info->align,
7495 7496 buf_info->flags);
7496 7497
7497 7498 buf_info->virt = NULL;
7498 7499 buf_info->phys = 0;
7499 7500 buf_info->data_handle = 0;
7500 7501 buf_info->dma_handle = 0;
7501 7502 goto done;
7502 7503 }
7503 7504
7504 7505 status = ddi_dma_mem_alloc(
7505 7506 (ddi_dma_handle_t)buf_info->dma_handle,
7506 7507 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
7507 7508 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
7508 7509 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
7509 7510
7510 7511 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
7511 7512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7512 7513 "ddi_dma_mem_alloc failed: size=%x align=%x "
7513 7514 "flags=%x", buf_info->size, buf_info->align,
7514 7515 buf_info->flags);
7515 7516
7516 7517 (void) ddi_dma_free_handle(
7517 7518 (ddi_dma_handle_t *)&buf_info->dma_handle);
7518 7519
7519 7520 buf_info->virt = NULL;
7520 7521 buf_info->phys = 0;
7521 7522 buf_info->data_handle = 0;
7522 7523 buf_info->dma_handle = 0;
7523 7524 goto done;
7524 7525 }
7525 7526
7526 7527 /* Map this page of memory */
7527 7528 status = ddi_dma_addr_bind_handle(
7528 7529 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
7529 7530 (caddr_t)buf_info->virt, (size_t)buf_info->size,
7530 7531 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
7531 7532 &dma_cookie, &cookie_count);
7532 7533
7533 7534 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
7534 7535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7535 7536 "ddi_dma_addr_bind_handle failed: status=%x "
7536 7537 "count=%d size=%x align=%x flags=%x", status,
7537 7538 cookie_count, buf_info->size, buf_info->align,
7538 7539 buf_info->flags);
7539 7540
7540 7541 (void) ddi_dma_mem_free(
7541 7542 (ddi_acc_handle_t *)&buf_info->data_handle);
7542 7543 (void) ddi_dma_free_handle(
7543 7544 (ddi_dma_handle_t *)&buf_info->dma_handle);
7544 7545
7545 7546 buf_info->virt = NULL;
7546 7547 buf_info->phys = 0;
7547 7548 buf_info->dma_handle = 0;
7548 7549 buf_info->data_handle = 0;
7549 7550 goto done;
7550 7551 }
7551 7552
7552 7553 if (hba->bus_type == SBUS_FC) {
7553 7554 int32_t burstsizes_limit = 0xff;
7554 7555 int32_t ret_burst;
7555 7556
7556 7557 ret_burst =
7557 7558 ddi_dma_burstsizes(buf_info->
7558 7559 dma_handle) & burstsizes_limit;
7559 7560 if (ddi_dma_set_sbus64(buf_info->dma_handle,
7560 7561 ret_burst) == DDI_FAILURE) {
7561 7562 EMLXS_MSGF(EMLXS_CONTEXT,
7562 7563 &emlxs_mem_alloc_failed_msg,
7563 7564 "ddi_dma_set_sbus64 failed.");
7564 7565 }
7565 7566 }
7566 7567
7567 7568 /* Save Physical address */
7568 7569 buf_info->phys = dma_cookie.dmac_laddress;
7569 7570
7570 7571 /* Just to be sure, let's add this */
7571 7572 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
7572 7573 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
7573 7574
7574 7575 } else { /* allocate virtual memory */
7575 7576
7576 7577 buf_info->virt =
7577 7578 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
7578 7579 buf_info->phys = 0;
7579 7580 buf_info->data_handle = 0;
7580 7581 buf_info->dma_handle = 0;
7581 7582
7582 7583 if (buf_info->virt == (uint32_t *)0) {
7583 7584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
7584 7585 "size=%x flags=%x", buf_info->size,
7585 7586 buf_info->flags);
7586 7587 }
7587 7588
7588 7589 }
7589 7590
7590 7591 done:
7591 7592
7592 7593 return ((uint8_t *)buf_info->virt);
7593 7594
7594 7595 } /* emlxs_mem_alloc() */
7595 7596
7596 7597
7597 7598
7598 7599 /*
7599 7600 * emlxs_mem_free:
7600 7601 *
7601 7602 * OS specific routine for memory de-allocation / unmapping
7602 7603 *
7603 7604 * The buf_info->flags field describes the memory operation requested.
7604 7605 *
7605 7606 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped
7606 7607 * for DMA, but not freed. The mapped physical address to be unmapped is in
7607 7608 * buf_info->phys
7608 7609 *
7609 7610 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
7610 7611 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
7611 7612 * buf_info->phys. The virtual address to be freed is in buf_info->virt
7612 7613 */
7613 7614 /*ARGSUSED*/
7614 7615 extern void
7615 7616 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
7616 7617 {
7617 7618 if (buf_info->flags & FC_MBUF_PHYSONLY) {
7618 7619
7619 7620 if (buf_info->dma_handle) {
7620 7621 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
7621 7622 (void) ddi_dma_free_handle(
7622 7623 (ddi_dma_handle_t *)&buf_info->dma_handle);
7623 7624 buf_info->dma_handle = NULL;
7624 7625 }
7625 7626
7626 7627 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
7627 7628
7628 7629 if (buf_info->dma_handle) {
7629 7630 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
7630 7631 (void) ddi_dma_mem_free(
7631 7632 (ddi_acc_handle_t *)&buf_info->data_handle);
7632 7633 (void) ddi_dma_free_handle(
7633 7634 (ddi_dma_handle_t *)&buf_info->dma_handle);
7634 7635 buf_info->dma_handle = NULL;
7635 7636 buf_info->data_handle = NULL;
7636 7637 }
7637 7638
7638 7639 } else { /* allocate virtual memory */
7639 7640
7640 7641 if (buf_info->virt) {
7641 7642 kmem_free(buf_info->virt, (size_t)buf_info->size);
7642 7643 buf_info->virt = NULL;
7643 7644 }
7644 7645 }
7645 7646
7646 7647 } /* emlxs_mem_free() */
7647 7648
7648 7649
7649 7650 static int
7650 7651 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
7651 7652 {
7652 7653 int channel;
7653 7654 int msi_id;
7654 7655
7655 7656
7656 7657 /* IO to FCP2 device or a device reset always use fcp channel */
7657 7658 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
7658 7659 return (hba->channel_fcp);
7659 7660 }
7660 7661
7661 7662
7662 7663 msi_id = emlxs_select_msiid(hba);
7663 7664 channel = emlxs_msiid_to_chan(hba, msi_id);
7664 7665
7665 7666
7666 7667
7667 7668 /* If channel is closed, then try fcp channel */
7668 7669 if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
7669 7670 channel = hba->channel_fcp;
7670 7671 }
7671 7672 return (channel);
7672 7673
7673 7674 }
7674 7675
7675 7676 static int32_t
7676 7677 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
7677 7678 {
7678 7679 emlxs_hba_t *hba = HBA;
7679 7680 fc_packet_t *pkt;
7680 7681 emlxs_config_t *cfg;
7681 7682 MAILBOXQ *mbq;
7682 7683 MAILBOX *mb;
7683 7684 uint32_t rc;
7684 7685
7685 7686 /*
7686 7687 * This routine provides a alternative target reset provessing
7687 7688 * method. Instead of sending an actual target reset to the
7688 7689 * NPort, we will first unreg the login to that NPort. This
7689 7690 * will cause all the outstanding IOs the quickly complete with
7690 7691 * a NO RPI local error. Next we will force the ULP to relogin
7691 7692 * to the NPort by sending an RSCN (for that NPort) to the
7692 7693 * upper layer. This method should result in a fast target
7693 7694 * reset, as far as IOs completing; however, since an actual
7694 7695 * target reset is not sent to the NPort, it is not 100%
7695 7696 * compatable. Things like reservations will not be broken.
7696 7697 * By default this option is DISABLED, and its only enabled thru
7697 7698 * a hidden configuration parameter (fast-tgt-reset).
7698 7699 */
7699 7700 rc = FC_TRAN_BUSY;
7700 7701 pkt = PRIV2PKT(sbp);
7701 7702 cfg = &CFG;
7702 7703
7703 7704 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
7704 7705 /* issue the mbox cmd to the sli */
7705 7706 mb = (MAILBOX *) mbq->mbox;
7706 7707 bzero((void *) mb, MAILBOX_CMD_BSIZE);
7707 7708 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
7708 7709 #ifdef SLI3_SUPPORT
7709 7710 mb->un.varUnregLogin.vpi = port->vpi;
7710 7711 #endif /* SLI3_SUPPORT */
7711 7712 mb->mbxCommand = MBX_UNREG_LOGIN;
7712 7713 mb->mbxOwner = OWN_HOST;
7713 7714
7714 7715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7715 7716 "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi,
7716 7717 cfg[CFG_FAST_TGT_RESET_TMR].current);
7717 7718
7718 7719 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
7719 7720 == MBX_SUCCESS) {
7720 7721
7721 7722 ndlp->nlp_Rpi = 0;
7722 7723
7723 7724 mutex_enter(&sbp->mtx);
7724 7725 sbp->node = (void *)ndlp;
7725 7726 sbp->did = ndlp->nlp_DID;
7726 7727 mutex_exit(&sbp->mtx);
7727 7728
7728 7729 if (pkt->pkt_rsplen) {
7729 7730 bzero((uint8_t *)pkt->pkt_resp,
7730 7731 pkt->pkt_rsplen);
7731 7732 }
7732 7733 if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
7733 7734 ndlp->nlp_force_rscn = hba->timer_tics +
7734 7735 cfg[CFG_FAST_TGT_RESET_TMR].current;
7735 7736 }
7736 7737
7737 7738 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
7738 7739 }
7739 7740
7740 7741 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7741 7742 rc = FC_SUCCESS;
7742 7743 }
7743 7744 return (rc);
7744 7745 }
7745 7746
7746 7747 static int32_t
7747 7748 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
7748 7749 {
7749 7750 emlxs_hba_t *hba = HBA;
7750 7751 fc_packet_t *pkt;
7751 7752 emlxs_config_t *cfg;
7752 7753 IOCBQ *iocbq;
7753 7754 IOCB *iocb;
7754 7755 CHANNEL *cp;
7755 7756 NODELIST *ndlp;
7756 7757 char *cmd;
7757 7758 uint16_t lun;
7758 7759 FCP_CMND *fcp_cmd;
7759 7760 uint32_t did;
7760 7761 uint32_t reset = 0;
7761 7762 int channel;
7762 7763 int32_t rval;
7763 7764
7764 7765 pkt = PRIV2PKT(sbp);
7765 7766 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
7766 7767
7767 7768 /* Find target node object */
7768 7769 ndlp = emlxs_node_find_did(port, did);
7769 7770
7770 7771 if (!ndlp || !ndlp->nlp_active) {
7771 7772 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
7772 7773 "Node not found. did=%x", did);
7773 7774
7774 7775 return (FC_BADPACKET);
7775 7776 }
7776 7777
7777 7778 /* When the fcp channel is closed we stop accepting any FCP cmd */
7778 7779 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
7779 7780 return (FC_TRAN_BUSY);
7780 7781 }
7781 7782
7782 7783 /* Snoop for target or lun reset first */
7783 7784 /* We always use FCP channel to send out target/lun reset fcp cmds */
7784 7785 /* interrupt affinity only applies to non tgt lun reset fcp cmd */
7785 7786
7786 7787 cmd = (char *)pkt->pkt_cmd;
7787 7788 lun = *((uint16_t *)cmd);
7788 7789 lun = LE_SWAP16(lun);
7789 7790
7790 7791 iocbq = &sbp->iocbq;
7791 7792 iocb = &iocbq->iocb;
7792 7793 iocbq->node = (void *) ndlp;
7793 7794
7794 7795 /* Check for target reset */
7795 7796 if (cmd[10] & 0x20) {
7796 7797 /* prepare iocb */
7797 7798 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7798 7799 hba->channel_fcp)) != FC_SUCCESS) {
7799 7800
7800 7801 if (rval == 0xff) {
7801 7802 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7802 7803 0, 1);
7803 7804 rval = FC_SUCCESS;
7804 7805 }
7805 7806
7806 7807 return (rval);
7807 7808 }
7808 7809
7809 7810 mutex_enter(&sbp->mtx);
7810 7811 sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
7811 7812 sbp->pkt_flags |= PACKET_POLLED;
7812 7813 *pkt_flags = sbp->pkt_flags;
7813 7814 mutex_exit(&sbp->mtx);
7814 7815
7815 7816 #ifdef SAN_DIAG_SUPPORT
7816 7817 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
7817 7818 (HBA_WWN *)&ndlp->nlp_portname, -1);
7818 7819 #endif /* SAN_DIAG_SUPPORT */
7819 7820
7820 7821 iocbq->flag |= IOCB_PRIORITY;
7821 7822
7822 7823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7823 7824 "Target Reset: did=%x", did);
7824 7825
7825 7826 cfg = &CFG;
7826 7827 if (cfg[CFG_FAST_TGT_RESET].current) {
7827 7828 if (emlxs_fast_target_reset(port, sbp, ndlp) ==
7828 7829 FC_SUCCESS) {
7829 7830 return (FC_SUCCESS);
7830 7831 }
7831 7832 }
7832 7833
7833 7834 /* Close the node for any further normal IO */
7834 7835 emlxs_node_close(port, ndlp, hba->channel_fcp,
7835 7836 pkt->pkt_timeout);
7836 7837
7837 7838 /* Flush the IO's on the tx queues */
7838 7839 (void) emlxs_tx_node_flush(port, ndlp,
7839 7840 &hba->chan[hba->channel_fcp], 0, sbp);
7840 7841
7841 7842 /* This is the target reset fcp cmd */
7842 7843 reset = 1;
7843 7844 }
7844 7845
7845 7846 /* Check for lun reset */
7846 7847 else if (cmd[10] & 0x10) {
7847 7848 /* prepare iocb */
7848 7849 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7849 7850 hba->channel_fcp)) != FC_SUCCESS) {
7850 7851
7851 7852 if (rval == 0xff) {
7852 7853 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7853 7854 0, 1);
7854 7855 rval = FC_SUCCESS;
7855 7856 }
7856 7857
7857 7858 return (rval);
7858 7859 }
7859 7860
7860 7861 mutex_enter(&sbp->mtx);
7861 7862 sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
7862 7863 sbp->pkt_flags |= PACKET_POLLED;
7863 7864 *pkt_flags = sbp->pkt_flags;
7864 7865 mutex_exit(&sbp->mtx);
7865 7866
7866 7867 #ifdef SAN_DIAG_SUPPORT
7867 7868 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
7868 7869 (HBA_WWN *)&ndlp->nlp_portname, lun);
7869 7870 #endif /* SAN_DIAG_SUPPORT */
7870 7871
7871 7872 iocbq->flag |= IOCB_PRIORITY;
7872 7873
7873 7874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7874 7875 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
7875 7876 cmd[0], cmd[1]);
7876 7877
7877 7878 /* Flush the IO's on the tx queues for this lun */
7878 7879 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
7879 7880
7880 7881 /* This is the lun reset fcp cmd */
7881 7882 reset = 1;
7882 7883 }
7883 7884
7884 7885 channel = emlxs_select_fcp_channel(hba, ndlp, reset);
7885 7886
7886 7887 #ifdef SAN_DIAG_SUPPORT
7887 7888 sbp->sd_start_time = gethrtime();
7888 7889 #endif /* SAN_DIAG_SUPPORT */
7889 7890
7890 7891 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
7891 7892 emlxs_swap_fcp_pkt(sbp);
7892 7893 #endif /* EMLXS_MODREV2X */
7893 7894
7894 7895 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
7895 7896
7896 7897 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
7897 7898 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
7898 7899 }
7899 7900
7900 7901 if (reset == 0) {
7901 7902 /*
7902 7903 * tgt lun reset fcp cmd has been prepared
7903 7904 * separately in the beginning
7904 7905 */
7905 7906 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
7906 7907 channel)) != FC_SUCCESS) {
7907 7908
7908 7909 if (rval == 0xff) {
7909 7910 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
7910 7911 0, 1);
7911 7912 rval = FC_SUCCESS;
7912 7913 }
7913 7914
7914 7915 return (rval);
7915 7916 }
7916 7917 }
7917 7918
7918 7919 cp = &hba->chan[channel];
7919 7920 cp->ulpSendCmd++;
7920 7921
7921 7922 /* Initalize sbp */
7922 7923 mutex_enter(&sbp->mtx);
7923 7924 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
7924 7925 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
7925 7926 sbp->node = (void *)ndlp;
7926 7927 sbp->lun = lun;
7927 7928 sbp->class = iocb->ULPCLASS;
7928 7929 sbp->did = ndlp->nlp_DID;
7929 7930 mutex_exit(&sbp->mtx);
7930 7931
7931 7932 if (pkt->pkt_cmdlen) {
7932 7933 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
7933 7934 DDI_DMA_SYNC_FORDEV);
7934 7935 }
7935 7936
7936 7937 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
7937 7938 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
7938 7939 DDI_DMA_SYNC_FORDEV);
7939 7940 }
7940 7941
7941 7942 HBASTATS.FcpIssued++;
7942 7943
7943 7944 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
7944 7945 return (FC_SUCCESS);
7945 7946
7946 7947 } /* emlxs_send_fcp_cmd() */
7947 7948
7948 7949
7949 7950
7950 7951
7951 7952 /*
7952 7953 * We have to consider this setup works for INTX, MSI, and MSIX
7953 7954 * For INTX, intr_count is always 1
7954 7955 * For MSI, intr_count is always 2 by default
7955 7956 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
7956 7957 */
7957 7958 extern int
7958 7959 emlxs_select_msiid(emlxs_hba_t *hba)
7959 7960 {
7960 7961 int msiid = 0;
7961 7962
7962 7963 /* We use round-robin */
7963 7964 mutex_enter(&EMLXS_MSIID_LOCK);
7964 7965 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
7965 7966 msiid = hba->last_msiid;
7966 7967 hba->last_msiid ++;
7967 7968 if (hba->last_msiid >= hba->intr_count) {
7968 7969 hba->last_msiid = 0;
7969 7970 }
7970 7971 } else {
7971 7972 /* This should work for INTX and MSI also */
7972 7973 /* For SLI3 the chan_count is always 4 */
7973 7974 /* For SLI3 the msiid is limited to chan_count */
7974 7975 msiid = hba->last_msiid;
7975 7976 hba->last_msiid ++;
7976 7977 if (hba->intr_count > hba->chan_count) {
7977 7978 if (hba->last_msiid >= hba->chan_count) {
7978 7979 hba->last_msiid = 0;
7979 7980 }
7980 7981 } else {
7981 7982 if (hba->last_msiid >= hba->intr_count) {
7982 7983 hba->last_msiid = 0;
7983 7984 }
7984 7985 }
7985 7986 }
7986 7987 mutex_exit(&EMLXS_MSIID_LOCK);
7987 7988
7988 7989 return (msiid);
7989 7990 } /* emlxs_select_msiid */
7990 7991
7991 7992
7992 7993 /*
7993 7994 * A channel has a association with a msi id.
7994 7995 * One msi id could be associated with multiple channels.
7995 7996 */
7996 7997 extern int
7997 7998 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
7998 7999 {
7999 8000 emlxs_config_t *cfg = &CFG;
8000 8001 EQ_DESC_t *eqp;
8001 8002 int chan;
8002 8003 int num_wq;
8003 8004
8004 8005 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8005 8006 /* For SLI4 round robin all WQs associated with the msi_id */
8006 8007 eqp = &hba->sli.sli4.eq[msi_id];
8007 8008
8008 8009 mutex_enter(&eqp->lastwq_lock);
8009 8010 chan = eqp->lastwq;
8010 8011 eqp->lastwq++;
8011 8012 num_wq = cfg[CFG_NUM_WQ].current;
8012 8013 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8013 8014 eqp->lastwq -= num_wq;
8014 8015 }
8015 8016 mutex_exit(&eqp->lastwq_lock);
8016 8017
8017 8018 return (chan);
8018 8019 } else {
8019 8020 /* This is for SLI3 mode */
8020 8021 return (hba->msi2chan[msi_id]);
8021 8022 }
8022 8023
8023 8024 } /* emlxs_msiid_to_chan */
8024 8025
8025 8026
8026 8027 #ifdef SFCT_SUPPORT
8027 8028 static int32_t
8028 8029 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8029 8030 {
8030 8031 emlxs_hba_t *hba = HBA;
8031 8032 fc_packet_t *pkt;
8032 8033 IOCBQ *iocbq;
8033 8034 IOCB *iocb;
8034 8035 NODELIST *ndlp;
8035 8036 CHANNEL *cp;
8036 8037 uint16_t iotag;
8037 8038 uint32_t did;
8038 8039 ddi_dma_cookie_t *cp_cmd;
8039 8040
8040 8041 pkt = PRIV2PKT(sbp);
8041 8042
8042 8043 did = sbp->did;
8043 8044 ndlp = sbp->node;
8044 8045
8045 8046 iocbq = &sbp->iocbq;
8046 8047 iocb = &iocbq->iocb;
8047 8048
8048 8049 /* Make sure node is still active */
8049 8050 if (!ndlp->nlp_active) {
8050 8051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8051 8052 "*Node not found. did=%x", did);
8052 8053
8053 8054 return (FC_BADPACKET);
8054 8055 }
8055 8056
8056 8057 /* If gate is closed */
8057 8058 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8058 8059 return (FC_TRAN_BUSY);
8059 8060 }
8060 8061
8061 8062 /* Get the iotag by registering the packet */
8062 8063 iotag = emlxs_register_pkt(sbp->channel, sbp);
8063 8064
8064 8065 if (!iotag) {
8065 8066 /* No more command slots available, retry later */
8066 8067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8067 8068 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8068 8069
8069 8070 return (FC_TRAN_BUSY);
8070 8071 }
8071 8072
8072 8073 /* Point of no return */
8073 8074
8074 8075 cp = sbp->channel;
8075 8076 cp->ulpSendCmd++;
8076 8077
8077 8078 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8078 8079 cp_cmd = pkt->pkt_cmd_cookie;
8079 8080 #else
8080 8081 cp_cmd = &pkt->pkt_cmd_cookie;
8081 8082 #endif /* >= EMLXS_MODREV3 */
8082 8083
8083 8084 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
8084 8085 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
8085 8086 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
8086 8087 iocb->un.fcpt64.bdl.bdeFlags = 0;
8087 8088
8088 8089 if (hba->sli_mode < 3) {
8089 8090 iocb->ULPBDECOUNT = 1;
8090 8091 iocb->ULPLE = 1;
8091 8092 } else { /* SLI3 */
8092 8093
8093 8094 iocb->ULPBDECOUNT = 0;
8094 8095 iocb->ULPLE = 0;
8095 8096 iocb->unsli3.ext_iocb.ebde_count = 0;
8096 8097 }
8097 8098
8098 8099 /* Initalize iocbq */
8099 8100 iocbq->port = (void *)port;
8100 8101 iocbq->node = (void *)ndlp;
8101 8102 iocbq->channel = (void *)cp;
8102 8103
8103 8104 /* Initalize iocb */
8104 8105 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
8105 8106 iocb->ULPIOTAG = iotag;
8106 8107 iocb->ULPRSVDBYTE =
8107 8108 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8108 8109 iocb->ULPOWNER = OWN_CHIP;
8109 8110 iocb->ULPCLASS = sbp->class;
8110 8111 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8111 8112
8112 8113 /* Set the pkt timer */
8113 8114 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8114 8115 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8115 8116
8116 8117 if (pkt->pkt_cmdlen) {
8117 8118 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8118 8119 DDI_DMA_SYNC_FORDEV);
8119 8120 }
8120 8121
8121 8122 HBASTATS.FcpIssued++;
8122 8123
8123 8124 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8124 8125
8125 8126 return (FC_SUCCESS);
8126 8127
8127 8128 } /* emlxs_send_fct_status() */
8128 8129
8129 8130
8130 8131 static int32_t
8131 8132 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8132 8133 {
8133 8134 emlxs_hba_t *hba = HBA;
8134 8135 fc_packet_t *pkt;
8135 8136 IOCBQ *iocbq;
8136 8137 IOCB *iocb;
8137 8138 NODELIST *ndlp;
8138 8139 uint16_t iotag;
8139 8140 uint32_t did;
8140 8141
8141 8142 pkt = PRIV2PKT(sbp);
8142 8143
8143 8144 did = sbp->did;
8144 8145 ndlp = sbp->node;
8145 8146
8146 8147
8147 8148 iocbq = &sbp->iocbq;
8148 8149 iocb = &iocbq->iocb;
8149 8150
8150 8151 /* Make sure node is still active */
8151 8152 if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8152 8153 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8153 8154 "*Node not found. did=%x", did);
8154 8155
8155 8156 return (FC_BADPACKET);
8156 8157 }
8157 8158
8158 8159 /* If gate is closed */
8159 8160 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8160 8161 return (FC_TRAN_BUSY);
8161 8162 }
8162 8163
8163 8164 /* Get the iotag by registering the packet */
8164 8165 iotag = emlxs_register_pkt(sbp->channel, sbp);
8165 8166
8166 8167 if (!iotag) {
8167 8168 /* No more command slots available, retry later */
8168 8169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8169 8170 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did);
8170 8171
8171 8172 return (FC_TRAN_BUSY);
8172 8173 }
8173 8174
8174 8175 /* Point of no return */
8175 8176 iocbq->port = (void *)port;
8176 8177 iocbq->node = (void *)ndlp;
8177 8178 iocbq->channel = (void *)sbp->channel;
8178 8179 ((CHANNEL *)sbp->channel)->ulpSendCmd++;
8179 8180
8180 8181 /*
8181 8182 * Don't give the abort priority, we want the IOCB
8182 8183 * we are aborting to be processed first.
8183 8184 */
8184 8185 iocbq->flag |= IOCB_SPECIAL;
8185 8186
8186 8187 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
8187 8188 iocb->ULPIOTAG = iotag;
8188 8189 iocb->ULPLE = 1;
8189 8190 iocb->ULPCLASS = sbp->class;
8190 8191 iocb->ULPOWNER = OWN_CHIP;
8191 8192
8192 8193 if (hba->state >= FC_LINK_UP) {
8193 8194 /* Create the abort IOCB */
8194 8195 iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
8195 8196 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8196 8197
8197 8198 } else {
8198 8199 /* Create the close IOCB */
8199 8200 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
8200 8201
8201 8202 }
8202 8203
8203 8204 iocb->ULPRSVDBYTE =
8204 8205 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
8205 8206 /* Set the pkt timer */
8206 8207 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8207 8208 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8208 8209
8209 8210 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8210 8211
8211 8212 return (FC_SUCCESS);
8212 8213
8213 8214 } /* emlxs_send_fct_abort() */
8214 8215
8215 8216 #endif /* SFCT_SUPPORT */
8216 8217
8217 8218
8218 8219 static int32_t
8219 8220 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8220 8221 {
8221 8222 emlxs_hba_t *hba = HBA;
8222 8223 fc_packet_t *pkt;
8223 8224 IOCBQ *iocbq;
8224 8225 IOCB *iocb;
8225 8226 CHANNEL *cp;
8226 8227 uint32_t i;
8227 8228 NODELIST *ndlp;
8228 8229 uint32_t did;
8229 8230 int32_t rval;
8230 8231
8231 8232 pkt = PRIV2PKT(sbp);
8232 8233 cp = &hba->chan[hba->channel_ip];
8233 8234 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8234 8235
8235 8236 /* Check if node exists */
8236 8237 /* Broadcast did is always a success */
8237 8238 ndlp = emlxs_node_find_did(port, did);
8238 8239
8239 8240 if (!ndlp || !ndlp->nlp_active) {
8240 8241 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8241 8242 "Node not found. did=0x%x", did);
8242 8243
8243 8244 return (FC_BADPACKET);
8244 8245 }
8245 8246
8246 8247 /* Check if gate is temporarily closed */
8247 8248 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8248 8249 return (FC_TRAN_BUSY);
8249 8250 }
8250 8251
8251 8252 /* Check if an exchange has been created */
8252 8253 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8253 8254 /* No exchange. Try creating one */
8254 8255 (void) emlxs_create_xri(port, cp, ndlp);
8255 8256
8256 8257 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8257 8258 "Adapter Busy. Exchange not found. did=0x%x", did);
8258 8259
8259 8260 return (FC_TRAN_BUSY);
8260 8261 }
8261 8262
8262 8263 /* ULP PATCH: pkt_cmdlen was found to be set to zero */
8263 8264 /* on BROADCAST commands */
8264 8265 if (pkt->pkt_cmdlen == 0) {
8265 8266 /* Set the pkt_cmdlen to the cookie size */
8266 8267 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8267 8268 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8268 8269 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8269 8270 }
8270 8271 #else
8271 8272 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8272 8273 #endif /* >= EMLXS_MODREV3 */
8273 8274
8274 8275 }
8275 8276
8276 8277 iocbq = &sbp->iocbq;
8277 8278 iocb = &iocbq->iocb;
8278 8279
8279 8280 iocbq->node = (void *)ndlp;
8280 8281 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8281 8282
8282 8283 if (rval == 0xff) {
8283 8284 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8284 8285 rval = FC_SUCCESS;
8285 8286 }
8286 8287
8287 8288 return (rval);
8288 8289 }
8289 8290
8290 8291 cp->ulpSendCmd++;
8291 8292
8292 8293 /* Initalize sbp */
8293 8294 mutex_enter(&sbp->mtx);
8294 8295 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8295 8296 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8296 8297 sbp->node = (void *)ndlp;
8297 8298 sbp->lun = EMLXS_LUN_NONE;
8298 8299 sbp->class = iocb->ULPCLASS;
8299 8300 sbp->did = did;
8300 8301 mutex_exit(&sbp->mtx);
8301 8302
8302 8303 if (pkt->pkt_cmdlen) {
8303 8304 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8304 8305 DDI_DMA_SYNC_FORDEV);
8305 8306 }
8306 8307
8307 8308 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8308 8309
8309 8310 return (FC_SUCCESS);
8310 8311
8311 8312 } /* emlxs_send_ip() */
8312 8313
8313 8314
8314 8315 static int32_t
8315 8316 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
8316 8317 {
8317 8318 emlxs_hba_t *hba = HBA;
8318 8319 emlxs_port_t *vport;
8319 8320 fc_packet_t *pkt;
8320 8321 IOCBQ *iocbq;
8321 8322 CHANNEL *cp;
8322 8323 uint32_t cmd;
8323 8324 int i;
8324 8325 ELS_PKT *els_pkt;
8325 8326 NODELIST *ndlp;
8326 8327 uint32_t did;
8327 8328 char fcsp_msg[32];
8328 8329 int rc;
8329 8330 int32_t rval;
8330 8331 emlxs_config_t *cfg = &CFG;
8331 8332
8332 8333 fcsp_msg[0] = 0;
8333 8334 pkt = PRIV2PKT(sbp);
8334 8335 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8335 8336 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8336 8337
8337 8338 iocbq = &sbp->iocbq;
8338 8339
8339 8340 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8340 8341 emlxs_swap_els_pkt(sbp);
8341 8342 #endif /* EMLXS_MODREV2X */
8342 8343
8343 8344 cmd = *((uint32_t *)pkt->pkt_cmd);
8344 8345 cmd &= ELS_CMD_MASK;
8345 8346
8346 8347 /* Point of no return, except for ADISC & PLOGI */
8347 8348
8348 8349 /* Check node */
8349 8350 switch (cmd) {
8350 8351 case ELS_CMD_FLOGI:
8351 8352 case ELS_CMD_FDISC:
8352 8353 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8353 8354
8354 8355 if (emlxs_vpi_logi_notify(port, sbp)) {
8355 8356 pkt->pkt_state = FC_PKT_LOCAL_RJT;
8356 8357 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8357 8358 emlxs_unswap_pkt(sbp);
8358 8359 #endif /* EMLXS_MODREV2X */
8359 8360 return (FC_FAILURE);
8360 8361 }
8361 8362 } else {
8362 8363 /*
8363 8364 * If FLOGI is already complete, then we
8364 8365 * should not be receiving another FLOGI.
8365 8366 * Reset the link to recover.
8366 8367 */
8367 8368 if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
8368 8369 pkt->pkt_state = FC_PKT_LOCAL_RJT;
8369 8370 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8370 8371 emlxs_unswap_pkt(sbp);
8371 8372 #endif /* EMLXS_MODREV2X */
8372 8373
8373 8374 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8374 8375 return (FC_FAILURE);
8375 8376 }
8376 8377
8377 8378 if (port->vpi > 0) {
8378 8379 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
8379 8380 }
8380 8381 }
8381 8382
8382 8383 /* Command may have been changed */
8383 8384 cmd = *((uint32_t *)pkt->pkt_cmd);
8384 8385 cmd &= ELS_CMD_MASK;
8385 8386
8386 8387 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
8387 8388 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
8388 8389 }
8389 8390
8390 8391 ndlp = NULL;
8391 8392
8392 8393 /* We will process these cmds at the bottom of this routine */
8393 8394 break;
8394 8395
8395 8396 case ELS_CMD_PLOGI:
8396 8397 /* Make sure we don't log into ourself */
8397 8398 for (i = 0; i < MAX_VPORTS; i++) {
8398 8399 vport = &VPORT(i);
8399 8400
8400 8401 if (!(vport->flag & EMLXS_PORT_BOUND)) {
8401 8402 continue;
8402 8403 }
8403 8404
8404 8405 if (did == vport->did) {
8405 8406 pkt->pkt_state = FC_PKT_NPORT_RJT;
8406 8407
8407 8408 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8408 8409 emlxs_unswap_pkt(sbp);
8409 8410 #endif /* EMLXS_MODREV2X */
8410 8411
8411 8412 return (FC_FAILURE);
8412 8413 }
8413 8414 }
8414 8415
8415 8416 ndlp = NULL;
8416 8417
8417 8418 /* Check if this is the first PLOGI */
8418 8419 /* after a PT_TO_PT connection */
8419 8420 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) {
8420 8421 MAILBOXQ *mbox;
8421 8422
8422 8423 /* ULP bug fix */
8423 8424 if (pkt->pkt_cmd_fhdr.s_id == 0) {
8424 8425 pkt->pkt_cmd_fhdr.s_id =
8425 8426 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID +
8426 8427 FP_DEFAULT_SID;
8427 8428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
8428 8429 "PLOGI: P2P Fix. sid=0-->%x did=%x",
8429 8430 pkt->pkt_cmd_fhdr.s_id,
8430 8431 pkt->pkt_cmd_fhdr.d_id);
8431 8432 }
8432 8433
8433 8434 mutex_enter(&EMLXS_PORT_LOCK);
8434 8435 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
8435 8436 mutex_exit(&EMLXS_PORT_LOCK);
8436 8437
8437 8438 /* Update our service parms */
8438 8439 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
8439 8440 MEM_MBOX, 1))) {
8440 8441 emlxs_mb_config_link(hba, mbox);
8441 8442
8442 8443 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
8443 8444 mbox, MBX_NOWAIT, 0);
8444 8445 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
8445 8446 emlxs_mem_put(hba, MEM_MBOX,
8446 8447 (void *)mbox);
8447 8448 }
8448 8449
8449 8450 }
8450 8451 }
8451 8452
8452 8453 /* We will process these cmds at the bottom of this routine */
8453 8454 break;
8454 8455
8455 8456 default:
8456 8457 ndlp = emlxs_node_find_did(port, did);
8457 8458
8458 8459 /* If an ADISC is being sent and we have no node, */
8459 8460 /* then we must fail the ADISC now */
8460 8461 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) {
8461 8462
8462 8463 /* Build the LS_RJT response */
8463 8464 els_pkt = (ELS_PKT *)pkt->pkt_resp;
8464 8465 els_pkt->elsCode = 0x01;
8465 8466 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
8466 8467 els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
8467 8468 LSRJT_LOGICAL_ERR;
8468 8469 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
8469 8470 LSEXP_NOTHING_MORE;
8470 8471 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
8471 8472
8472 8473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8473 8474 "ADISC Rejected. Node not found. did=0x%x", did);
8474 8475
8475 8476 if (sbp->channel == NULL) {
8476 8477 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8477 8478 sbp->channel =
8478 8479 &hba->chan[hba->channel_els];
8479 8480 } else {
8480 8481 sbp->channel =
8481 8482 &hba->chan[FC_ELS_RING];
8482 8483 }
8483 8484 }
8484 8485
8485 8486 /* Return this as rejected by the target */
8486 8487 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
8487 8488
8488 8489 return (FC_SUCCESS);
8489 8490 }
8490 8491 }
8491 8492
8492 8493 /* DID == BCAST_DID is special case to indicate that */
8493 8494 /* RPI is being passed in seq_id field */
8494 8495 /* This is used by emlxs_send_logo() for target mode */
8495 8496
8496 8497 /* Initalize iocbq */
8497 8498 iocbq->node = (void *)ndlp;
8498 8499 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8499 8500
8500 8501 if (rval == 0xff) {
8501 8502 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8502 8503 rval = FC_SUCCESS;
8503 8504 }
8504 8505
8505 8506 return (rval);
8506 8507 }
8507 8508
8508 8509 cp = &hba->chan[hba->channel_els];
8509 8510 cp->ulpSendCmd++;
8510 8511
8511 8512 /* Check cmd */
8512 8513 switch (cmd) {
8513 8514 case ELS_CMD_PRLI:
8514 8515 /*
8515 8516 * if our firmware version is 3.20 or later,
8516 8517 * set the following bits for FC-TAPE support.
8517 8518 */
8518 8519 if (port->ini_mode &&
8519 8520 (hba->vpd.feaLevelHigh >= 0x02) &&
8520 8521 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
8521 8522 els_pkt->un.prli.ConfmComplAllowed = 1;
8522 8523 els_pkt->un.prli.Retry = 1;
8523 8524 els_pkt->un.prli.TaskRetryIdReq = 1;
8524 8525 } else {
8525 8526 els_pkt->un.prli.ConfmComplAllowed = 0;
8526 8527 els_pkt->un.prli.Retry = 0;
8527 8528 els_pkt->un.prli.TaskRetryIdReq = 0;
8528 8529 }
8529 8530
8530 8531 break;
8531 8532
8532 8533 /* This is a patch for the ULP stack. */
8533 8534
8534 8535 /*
8535 8536 * ULP only reads our service parameters once during bind_port,
8536 8537 * but the service parameters change due to topology.
8537 8538 */
8538 8539 case ELS_CMD_FLOGI:
8539 8540 case ELS_CMD_FDISC:
8540 8541 case ELS_CMD_PLOGI:
8541 8542 case ELS_CMD_PDISC:
8542 8543 /* Copy latest service parameters to payload */
8543 8544 bcopy((void *) &port->sparam,
8544 8545 (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8545 8546
8546 8547 if ((hba->flag & FC_NPIV_ENABLED) &&
8547 8548 (hba->flag & FC_NPIV_SUPPORTED) &&
8548 8549 (cmd == ELS_CMD_PLOGI)) {
8549 8550 SERV_PARM *sp;
8550 8551 emlxs_vvl_fmt_t *vvl;
8551 8552
8552 8553 sp = (SERV_PARM *)&els_pkt->un.logi;
8553 8554 sp->VALID_VENDOR_VERSION = 1;
8554 8555 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
8555 8556 vvl->un0.w0.oui = 0x0000C9;
8556 8557 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
8557 8558 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
8558 8559 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
8559 8560 }
8560 8561
8561 8562 #ifdef DHCHAP_SUPPORT
8562 8563 emlxs_dhc_init_sp(port, did,
8563 8564 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8564 8565 #endif /* DHCHAP_SUPPORT */
8565 8566
8566 8567 break;
8567 8568 }
8568 8569
8569 8570 /* Initialize the sbp */
8570 8571 mutex_enter(&sbp->mtx);
8571 8572 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8572 8573 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8573 8574 sbp->node = (void *)ndlp;
8574 8575 sbp->lun = EMLXS_LUN_NONE;
8575 8576 sbp->did = did;
8576 8577 mutex_exit(&sbp->mtx);
8577 8578
8578 8579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
8579 8580 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
8580 8581
8581 8582 if (pkt->pkt_cmdlen) {
8582 8583 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8583 8584 DDI_DMA_SYNC_FORDEV);
8584 8585 }
8585 8586
8586 8587 /* Check node */
8587 8588 switch (cmd) {
8588 8589 case ELS_CMD_FLOGI:
8589 8590 case ELS_CMD_FDISC:
8590 8591 if (port->ini_mode) {
8591 8592 /* Make sure fabric node is destroyed */
8592 8593 /* It should already have been destroyed at link down */
8593 8594 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
8594 8595 ndlp = emlxs_node_find_did(port, FABRIC_DID);
8595 8596 if (ndlp) {
8596 8597 if (emlxs_mb_unreg_node(port, ndlp,
8597 8598 NULL, NULL, iocbq) == 0) {
8598 8599 /* Deferring iocb tx until */
8599 8600 /* completion of unreg */
8600 8601 return (FC_SUCCESS);
8601 8602 }
8602 8603 }
8603 8604 }
8604 8605 }
8605 8606 break;
8606 8607
8607 8608 case ELS_CMD_PLOGI:
8608 8609
8609 8610 ndlp = emlxs_node_find_did(port, did);
8610 8611
8611 8612 if (ndlp && ndlp->nlp_active) {
8612 8613 /* Close the node for any further normal IO */
8613 8614 emlxs_node_close(port, ndlp, hba->channel_fcp,
8614 8615 pkt->pkt_timeout + 10);
8615 8616 emlxs_node_close(port, ndlp, hba->channel_ip,
8616 8617 pkt->pkt_timeout + 10);
8617 8618
8618 8619 /* Flush tx queues */
8619 8620 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
8620 8621
8621 8622 /* Flush chip queues */
8622 8623 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
8623 8624 }
8624 8625
8625 8626 break;
8626 8627
8627 8628 case ELS_CMD_PRLI:
8628 8629
8629 8630 ndlp = emlxs_node_find_did(port, did);
8630 8631
8631 8632 if (ndlp && ndlp->nlp_active) {
8632 8633 /*
8633 8634 * Close the node for any further FCP IO;
8634 8635 * Flush all outstanding I/O only if
8635 8636 * "Establish Image Pair" bit is set.
8636 8637 */
8637 8638 emlxs_node_close(port, ndlp, hba->channel_fcp,
8638 8639 pkt->pkt_timeout + 10);
8639 8640
8640 8641 if (els_pkt->un.prli.estabImagePair) {
8641 8642 /* Flush tx queues */
8642 8643 (void) emlxs_tx_node_flush(port, ndlp,
8643 8644 &hba->chan[hba->channel_fcp], 0, 0);
8644 8645
8645 8646 /* Flush chip queues */
8646 8647 (void) emlxs_chipq_node_flush(port,
8647 8648 &hba->chan[hba->channel_fcp], ndlp, 0);
8648 8649 }
8649 8650 }
8650 8651
8651 8652 break;
8652 8653
8653 8654 }
8654 8655
8655 8656 HBASTATS.ElsCmdIssued++;
8656 8657
8657 8658 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8658 8659
8659 8660 return (FC_SUCCESS);
8660 8661
8661 8662 } /* emlxs_send_els() */
8662 8663
8663 8664
8664 8665
8665 8666
8666 8667 static int32_t
8667 8668 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
8668 8669 {
8669 8670 emlxs_hba_t *hba = HBA;
8670 8671 emlxs_config_t *cfg = &CFG;
8671 8672 fc_packet_t *pkt;
8672 8673 IOCBQ *iocbq;
8673 8674 IOCB *iocb;
8674 8675 NODELIST *ndlp;
8675 8676 CHANNEL *cp;
8676 8677 int i;
8677 8678 uint32_t cmd;
8678 8679 uint32_t ucmd;
8679 8680 ELS_PKT *els_pkt;
8680 8681 fc_unsol_buf_t *ubp;
8681 8682 emlxs_ub_priv_t *ub_priv;
8682 8683 uint32_t did;
8683 8684 char fcsp_msg[32];
8684 8685 uint8_t *ub_buffer;
8685 8686 int32_t rval;
8686 8687
8687 8688 fcsp_msg[0] = 0;
8688 8689 pkt = PRIV2PKT(sbp);
8689 8690 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
8690 8691 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8691 8692
8692 8693 iocbq = &sbp->iocbq;
8693 8694 iocb = &iocbq->iocb;
8694 8695
8695 8696 /* Acquire the unsolicited command this pkt is replying to */
8696 8697 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
8697 8698 /* This is for auto replies when no ub's are used */
8698 8699 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
8699 8700 ubp = NULL;
8700 8701 ub_priv = NULL;
8701 8702 ub_buffer = NULL;
8702 8703
8703 8704 #ifdef SFCT_SUPPORT
8704 8705 if (sbp->fct_cmd) {
8705 8706 fct_els_t *els =
8706 8707 (fct_els_t *)sbp->fct_cmd->cmd_specific;
8707 8708 ub_buffer = (uint8_t *)els->els_req_payload;
8708 8709 }
8709 8710 #endif /* SFCT_SUPPORT */
8710 8711
8711 8712 } else {
8712 8713 /* Find the ub buffer that goes with this reply */
8713 8714 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
8714 8715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
8715 8716 "ELS reply: Invalid oxid=%x",
8716 8717 pkt->pkt_cmd_fhdr.ox_id);
8717 8718 return (FC_BADPACKET);
8718 8719 }
8719 8720
8720 8721 ub_buffer = (uint8_t *)ubp->ub_buffer;
8721 8722 ub_priv = ubp->ub_fca_private;
8722 8723 ucmd = ub_priv->cmd;
8723 8724
8724 8725 ub_priv->flags |= EMLXS_UB_REPLY;
8725 8726
8726 8727 /* Reset oxid to ELS command */
8727 8728 /* We do this because the ub is only valid */
8728 8729 /* until we return from this thread */
8729 8730 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
8730 8731 }
8731 8732
8732 8733 /* Save the result */
8733 8734 sbp->ucmd = ucmd;
8734 8735
8735 8736 if (sbp->channel == NULL) {
8736 8737 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8737 8738 sbp->channel = &hba->chan[hba->channel_els];
8738 8739 } else {
8739 8740 sbp->channel = &hba->chan[FC_ELS_RING];
8740 8741 }
8741 8742 }
8742 8743
8743 8744 /* Check for interceptions */
8744 8745 switch (ucmd) {
8745 8746
8746 8747 #ifdef ULP_PATCH2
8747 8748 case ELS_CMD_LOGO:
8748 8749 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
8749 8750 break;
8750 8751 }
8751 8752
8752 8753 /* Check if this was generated by ULP and not us */
8753 8754 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8754 8755
8755 8756 /*
8756 8757 * Since we replied to this already,
8757 8758 * we won't need to send this now
8758 8759 */
8759 8760 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8760 8761
8761 8762 return (FC_SUCCESS);
8762 8763 }
8763 8764
8764 8765 break;
8765 8766 #endif /* ULP_PATCH2 */
8766 8767
8767 8768 #ifdef ULP_PATCH3
8768 8769 case ELS_CMD_PRLI:
8769 8770 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
8770 8771 break;
8771 8772 }
8772 8773
8773 8774 /* Check if this was generated by ULP and not us */
8774 8775 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8775 8776
8776 8777 /*
8777 8778 * Since we replied to this already,
8778 8779 * we won't need to send this now
8779 8780 */
8780 8781 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8781 8782
8782 8783 return (FC_SUCCESS);
8783 8784 }
8784 8785
8785 8786 break;
8786 8787 #endif /* ULP_PATCH3 */
8787 8788
8788 8789
8789 8790 #ifdef ULP_PATCH4
8790 8791 case ELS_CMD_PRLO:
8791 8792 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
8792 8793 break;
8793 8794 }
8794 8795
8795 8796 /* Check if this was generated by ULP and not us */
8796 8797 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
8797 8798 /*
8798 8799 * Since we replied to this already,
8799 8800 * we won't need to send this now
8800 8801 */
8801 8802 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8802 8803
8803 8804 return (FC_SUCCESS);
8804 8805 }
8805 8806
8806 8807 break;
8807 8808 #endif /* ULP_PATCH4 */
8808 8809
8809 8810 #ifdef ULP_PATCH6
8810 8811 case ELS_CMD_RSCN:
8811 8812 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
8812 8813 break;
8813 8814 }
8814 8815
8815 8816 /* Check if this RSCN was generated by us */
8816 8817 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8817 8818 cmd = *((uint32_t *)pkt->pkt_cmd);
8818 8819 cmd = LE_SWAP32(cmd);
8819 8820 cmd &= ELS_CMD_MASK;
8820 8821
8821 8822 /*
8822 8823 * If ULP is accepting this,
8823 8824 * then close affected node
8824 8825 */
8825 8826 if (port->ini_mode && ub_buffer && cmd
8826 8827 == ELS_CMD_ACC) {
8827 8828 fc_rscn_t *rscn;
8828 8829 uint32_t count;
8829 8830 uint32_t *lp;
8830 8831
8831 8832 /*
8832 8833 * Only the Leadville code path will
8833 8834 * come thru here. The RSCN data is NOT
8834 8835 * swapped properly for the Comstar code
8835 8836 * path.
8836 8837 */
8837 8838 lp = (uint32_t *)ub_buffer;
8838 8839 rscn = (fc_rscn_t *)lp++;
8839 8840 count =
8840 8841 ((rscn->rscn_payload_len - 4) / 4);
8841 8842
8842 8843 /* Close affected ports */
8843 8844 for (i = 0; i < count; i++, lp++) {
8844 8845 (void) emlxs_port_offline(port,
8845 8846 *lp);
8846 8847 }
8847 8848 }
8848 8849
8849 8850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8850 8851 "RSCN %s: did=%x oxid=%x rxid=%x. "
8851 8852 "Intercepted.", emlxs_elscmd_xlate(cmd),
8852 8853 did, pkt->pkt_cmd_fhdr.ox_id,
8853 8854 pkt->pkt_cmd_fhdr.rx_id);
8854 8855
8855 8856 /*
8856 8857 * Since we generated this RSCN,
8857 8858 * we won't need to send this reply
8858 8859 */
8859 8860 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8860 8861
8861 8862 return (FC_SUCCESS);
8862 8863 }
8863 8864
8864 8865 break;
8865 8866 #endif /* ULP_PATCH6 */
8866 8867
8867 8868 case ELS_CMD_PLOGI:
8868 8869 /* Check if this PLOGI was generated by us */
8869 8870 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
8870 8871 cmd = *((uint32_t *)pkt->pkt_cmd);
8871 8872 cmd = LE_SWAP32(cmd);
8872 8873 cmd &= ELS_CMD_MASK;
8873 8874
8874 8875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8875 8876 "PLOGI %s: did=%x oxid=%x rxid=%x. "
8876 8877 "Intercepted.", emlxs_elscmd_xlate(cmd),
8877 8878 did, pkt->pkt_cmd_fhdr.ox_id,
8878 8879 pkt->pkt_cmd_fhdr.rx_id);
8879 8880
8880 8881 /*
8881 8882 * Since we generated this PLOGI,
8882 8883 * we won't need to send this reply
8883 8884 */
8884 8885 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
8885 8886
8886 8887 return (FC_SUCCESS);
8887 8888 }
8888 8889
8889 8890 break;
8890 8891 }
8891 8892
8892 8893 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8893 8894 emlxs_swap_els_pkt(sbp);
8894 8895 #endif /* EMLXS_MODREV2X */
8895 8896
8896 8897
8897 8898 cmd = *((uint32_t *)pkt->pkt_cmd);
8898 8899 cmd &= ELS_CMD_MASK;
8899 8900
8900 8901 /* Check if modifications are needed */
8901 8902 switch (ucmd) {
8902 8903 case (ELS_CMD_PRLI):
8903 8904
8904 8905 if (cmd == ELS_CMD_ACC) {
8905 8906 /* This is a patch for the ULP stack. */
8906 8907 /* ULP does not keep track of FCP2 support */
8907 8908 if (port->ini_mode &&
8908 8909 (hba->vpd.feaLevelHigh >= 0x02) &&
8909 8910 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
8910 8911 els_pkt->un.prli.ConfmComplAllowed = 1;
8911 8912 els_pkt->un.prli.Retry = 1;
8912 8913 els_pkt->un.prli.TaskRetryIdReq = 1;
8913 8914 } else {
8914 8915 els_pkt->un.prli.ConfmComplAllowed = 0;
8915 8916 els_pkt->un.prli.Retry = 0;
8916 8917 els_pkt->un.prli.TaskRetryIdReq = 0;
8917 8918 }
8918 8919 }
8919 8920
8920 8921 break;
8921 8922
8922 8923 case ELS_CMD_FLOGI:
8923 8924 case ELS_CMD_PLOGI:
8924 8925 case ELS_CMD_FDISC:
8925 8926 case ELS_CMD_PDISC:
8926 8927
8927 8928 if (cmd == ELS_CMD_ACC) {
8928 8929 /* This is a patch for the ULP stack. */
8929 8930
8930 8931 /*
8931 8932 * ULP only reads our service parameters
8932 8933 * once during bind_port, but the service
8933 8934 * parameters change due to topology.
8934 8935 */
8935 8936
8936 8937 /* Copy latest service parameters to payload */
8937 8938 bcopy((void *)&port->sparam,
8938 8939 (void *)&els_pkt->un.logi, sizeof (SERV_PARM));
8939 8940
8940 8941 #ifdef DHCHAP_SUPPORT
8941 8942 emlxs_dhc_init_sp(port, did,
8942 8943 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg);
8943 8944 #endif /* DHCHAP_SUPPORT */
8944 8945
8945 8946 }
8946 8947
8947 8948 break;
8948 8949
8949 8950 }
8950 8951
8951 8952 /* Initalize iocbq */
8952 8953 iocbq->node = (void *)NULL;
8953 8954 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
8954 8955
8955 8956 if (rval == 0xff) {
8956 8957 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8957 8958 rval = FC_SUCCESS;
8958 8959 }
8959 8960
8960 8961 return (rval);
8961 8962 }
8962 8963
8963 8964 cp = &hba->chan[hba->channel_els];
8964 8965 cp->ulpSendCmd++;
8965 8966
8966 8967 /* Initalize sbp */
8967 8968 mutex_enter(&sbp->mtx);
8968 8969 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8969 8970 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8970 8971 sbp->node = (void *) NULL;
8971 8972 sbp->lun = EMLXS_LUN_NONE;
8972 8973 sbp->class = iocb->ULPCLASS;
8973 8974 sbp->did = did;
8974 8975 mutex_exit(&sbp->mtx);
8975 8976
8976 8977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
8977 8978 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
8978 8979 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
8979 8980 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
8980 8981
8981 8982 /* Process nodes */
8982 8983 switch (ucmd) {
8983 8984 case ELS_CMD_RSCN:
8984 8985 {
8985 8986 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) {
8986 8987 fc_rscn_t *rscn;
8987 8988 uint32_t count;
8988 8989 uint32_t *lp = NULL;
8989 8990
8990 8991 /*
8991 8992 * Only the Leadville code path will come thru
8992 8993 * here. The RSCN data is NOT swapped properly
8993 8994 * for the Comstar code path.
8994 8995 */
8995 8996 lp = (uint32_t *)ub_buffer;
8996 8997 rscn = (fc_rscn_t *)lp++;
8997 8998 count = ((rscn->rscn_payload_len - 4) / 4);
8998 8999
8999 9000 /* Close affected ports */
9000 9001 for (i = 0; i < count; i++, lp++) {
9001 9002 (void) emlxs_port_offline(port, *lp);
9002 9003 }
9003 9004 }
9004 9005 break;
9005 9006 }
9006 9007 case ELS_CMD_PLOGI:
9007 9008
9008 9009 if (cmd == ELS_CMD_ACC) {
9009 9010 ndlp = emlxs_node_find_did(port, did);
9010 9011
9011 9012 if (ndlp && ndlp->nlp_active) {
9012 9013 /* Close the node for any further normal IO */
9013 9014 emlxs_node_close(port, ndlp, hba->channel_fcp,
9014 9015 pkt->pkt_timeout + 10);
9015 9016 emlxs_node_close(port, ndlp, hba->channel_ip,
9016 9017 pkt->pkt_timeout + 10);
9017 9018
9018 9019 /* Flush tx queue */
9019 9020 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9020 9021
9021 9022 /* Flush chip queue */
9022 9023 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9023 9024 }
9024 9025 }
9025 9026
9026 9027 break;
9027 9028
9028 9029 case ELS_CMD_PRLI:
9029 9030
9030 9031 if (cmd == ELS_CMD_ACC) {
9031 9032 ndlp = emlxs_node_find_did(port, did);
9032 9033
9033 9034 if (ndlp && ndlp->nlp_active) {
9034 9035 /* Close the node for any further normal IO */
9035 9036 emlxs_node_close(port, ndlp, hba->channel_fcp,
9036 9037 pkt->pkt_timeout + 10);
9037 9038
9038 9039 /* Flush tx queues */
9039 9040 (void) emlxs_tx_node_flush(port, ndlp,
9040 9041 &hba->chan[hba->channel_fcp], 0, 0);
9041 9042
9042 9043 /* Flush chip queues */
9043 9044 (void) emlxs_chipq_node_flush(port,
9044 9045 &hba->chan[hba->channel_fcp], ndlp, 0);
9045 9046 }
9046 9047 }
9047 9048
9048 9049 break;
9049 9050
9050 9051 case ELS_CMD_PRLO:
9051 9052
9052 9053 if (cmd == ELS_CMD_ACC) {
9053 9054 ndlp = emlxs_node_find_did(port, did);
9054 9055
9055 9056 if (ndlp && ndlp->nlp_active) {
9056 9057 /* Close the node for any further normal IO */
9057 9058 emlxs_node_close(port, ndlp,
9058 9059 hba->channel_fcp, 60);
9059 9060
9060 9061 /* Flush tx queues */
9061 9062 (void) emlxs_tx_node_flush(port, ndlp,
9062 9063 &hba->chan[hba->channel_fcp], 0, 0);
9063 9064
9064 9065 /* Flush chip queues */
9065 9066 (void) emlxs_chipq_node_flush(port,
9066 9067 &hba->chan[hba->channel_fcp], ndlp, 0);
9067 9068 }
9068 9069 }
9069 9070
9070 9071 break;
9071 9072
9072 9073 case ELS_CMD_LOGO:
9073 9074
9074 9075 if (cmd == ELS_CMD_ACC) {
9075 9076 ndlp = emlxs_node_find_did(port, did);
9076 9077
9077 9078 if (ndlp && ndlp->nlp_active) {
9078 9079 /* Close the node for any further normal IO */
9079 9080 emlxs_node_close(port, ndlp,
9080 9081 hba->channel_fcp, 60);
9081 9082 emlxs_node_close(port, ndlp,
9082 9083 hba->channel_ip, 60);
9083 9084
9084 9085 /* Flush tx queues */
9085 9086 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9086 9087
9087 9088 /* Flush chip queues */
9088 9089 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9089 9090 }
9090 9091 }
9091 9092
9092 9093 break;
9093 9094 }
9094 9095
9095 9096 if (pkt->pkt_cmdlen) {
9096 9097 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9097 9098 DDI_DMA_SYNC_FORDEV);
9098 9099 }
9099 9100
9100 9101 HBASTATS.ElsRspIssued++;
9101 9102
9102 9103 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9103 9104
9104 9105 return (FC_SUCCESS);
9105 9106
9106 9107 } /* emlxs_send_els_rsp() */
9107 9108
9108 9109
9109 9110 #ifdef MENLO_SUPPORT
9110 9111 static int32_t
9111 9112 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9112 9113 {
9113 9114 emlxs_hba_t *hba = HBA;
9114 9115 fc_packet_t *pkt;
9115 9116 IOCBQ *iocbq;
9116 9117 IOCB *iocb;
9117 9118 CHANNEL *cp;
9118 9119 NODELIST *ndlp;
9119 9120 uint32_t did;
9120 9121 uint32_t *lp;
9121 9122 int32_t rval;
9122 9123
9123 9124 pkt = PRIV2PKT(sbp);
9124 9125 did = EMLXS_MENLO_DID;
9125 9126 lp = (uint32_t *)pkt->pkt_cmd;
9126 9127
9127 9128 iocbq = &sbp->iocbq;
9128 9129 iocb = &iocbq->iocb;
9129 9130
9130 9131 ndlp = emlxs_node_find_did(port, did);
9131 9132
9132 9133 if (!ndlp || !ndlp->nlp_active) {
9133 9134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9134 9135 "Node not found. did=0x%x", did);
9135 9136
9136 9137 return (FC_BADPACKET);
9137 9138 }
9138 9139
9139 9140 iocbq->node = (void *) ndlp;
9140 9141 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9141 9142
9142 9143 if (rval == 0xff) {
9143 9144 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9144 9145 rval = FC_SUCCESS;
9145 9146 }
9146 9147
9147 9148 return (rval);
9148 9149 }
9149 9150
9150 9151 cp = &hba->chan[hba->channel_ct];
9151 9152 cp->ulpSendCmd++;
9152 9153
9153 9154 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9154 9155 /* Cmd phase */
9155 9156
9156 9157 /* Initalize iocb */
9157 9158 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9158 9159 iocb->ULPCONTEXT = 0;
9159 9160 iocb->ULPPU = 3;
9160 9161
9161 9162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9162 9163 "%s: [%08x,%08x,%08x,%08x]",
9163 9164 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9164 9165 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9165 9166
9166 9167 } else { /* FC_PKT_OUTBOUND */
9167 9168
9168 9169 /* MENLO_CMD_FW_DOWNLOAD Data Phase */
9169 9170 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9170 9171
9171 9172 /* Initalize iocb */
9172 9173 iocb->un.genreq64.param = 0;
9173 9174 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9174 9175 iocb->ULPPU = 1;
9175 9176
9176 9177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9177 9178 "%s: Data: rxid=0x%x size=%d",
9178 9179 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9179 9180 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9180 9181 }
9181 9182
9182 9183 /* Initalize sbp */
9183 9184 mutex_enter(&sbp->mtx);
9184 9185 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9185 9186 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9186 9187 sbp->node = (void *) ndlp;
9187 9188 sbp->lun = EMLXS_LUN_NONE;
9188 9189 sbp->class = iocb->ULPCLASS;
9189 9190 sbp->did = did;
9190 9191 mutex_exit(&sbp->mtx);
9191 9192
9192 9193 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9193 9194 DDI_DMA_SYNC_FORDEV);
9194 9195
9195 9196 HBASTATS.CtCmdIssued++;
9196 9197
9197 9198 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9198 9199
9199 9200 return (FC_SUCCESS);
9200 9201
9201 9202 } /* emlxs_send_menlo() */
9202 9203 #endif /* MENLO_SUPPORT */
9203 9204
9204 9205
9205 9206 static int32_t
9206 9207 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9207 9208 {
9208 9209 emlxs_hba_t *hba = HBA;
9209 9210 fc_packet_t *pkt;
9210 9211 IOCBQ *iocbq;
9211 9212 IOCB *iocb;
9212 9213 NODELIST *ndlp;
9213 9214 uint32_t did;
9214 9215 CHANNEL *cp;
9215 9216 int32_t rval;
9216 9217
9217 9218 pkt = PRIV2PKT(sbp);
9218 9219 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9219 9220
9220 9221 iocbq = &sbp->iocbq;
9221 9222 iocb = &iocbq->iocb;
9222 9223
9223 9224 ndlp = emlxs_node_find_did(port, did);
9224 9225
9225 9226 if (!ndlp || !ndlp->nlp_active) {
9226 9227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9227 9228 "Node not found. did=0x%x", did);
9228 9229
9229 9230 return (FC_BADPACKET);
9230 9231 }
9231 9232
9232 9233 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9233 9234 emlxs_swap_ct_pkt(sbp);
9234 9235 #endif /* EMLXS_MODREV2X */
9235 9236
9236 9237 iocbq->node = (void *)ndlp;
9237 9238 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9238 9239
9239 9240 if (rval == 0xff) {
9240 9241 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9241 9242 rval = FC_SUCCESS;
9242 9243 }
9243 9244
9244 9245 return (rval);
9245 9246 }
9246 9247
9247 9248 cp = &hba->chan[hba->channel_ct];
9248 9249 cp->ulpSendCmd++;
9249 9250
9250 9251 /* Initalize sbp */
9251 9252 mutex_enter(&sbp->mtx);
9252 9253 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9253 9254 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9254 9255 sbp->node = (void *)ndlp;
9255 9256 sbp->lun = EMLXS_LUN_NONE;
9256 9257 sbp->class = iocb->ULPCLASS;
9257 9258 sbp->did = did;
9258 9259 mutex_exit(&sbp->mtx);
9259 9260
9260 9261 if (did == NAMESERVER_DID) {
9261 9262 SLI_CT_REQUEST *CtCmd;
9262 9263 uint32_t *lp0;
9263 9264
9264 9265 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9265 9266 lp0 = (uint32_t *)pkt->pkt_cmd;
9266 9267
9267 9268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9268 9269 "%s: did=%x [%08x,%08x]",
9269 9270 emlxs_ctcmd_xlate(
9270 9271 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9271 9272 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9272 9273
9273 9274 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9274 9275 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9275 9276 }
9276 9277
9277 9278 } else if (did == FDMI_DID) {
9278 9279 SLI_CT_REQUEST *CtCmd;
9279 9280 uint32_t *lp0;
9280 9281
9281 9282 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9282 9283 lp0 = (uint32_t *)pkt->pkt_cmd;
9283 9284
9284 9285 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9285 9286 "%s: did=%x [%08x,%08x]",
9286 9287 emlxs_mscmd_xlate(
9287 9288 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9288 9289 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9289 9290 } else {
9290 9291 SLI_CT_REQUEST *CtCmd;
9291 9292 uint32_t *lp0;
9292 9293
9293 9294 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9294 9295 lp0 = (uint32_t *)pkt->pkt_cmd;
9295 9296
9296 9297 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9297 9298 "%s: did=%x [%08x,%08x]",
9298 9299 emlxs_rmcmd_xlate(
9299 9300 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
9300 9301 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
9301 9302 }
9302 9303
9303 9304 if (pkt->pkt_cmdlen) {
9304 9305 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9305 9306 DDI_DMA_SYNC_FORDEV);
9306 9307 }
9307 9308
9308 9309 HBASTATS.CtCmdIssued++;
9309 9310
9310 9311 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9311 9312
9312 9313 return (FC_SUCCESS);
9313 9314
9314 9315 } /* emlxs_send_ct() */
9315 9316
9316 9317
9317 9318 static int32_t
9318 9319 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9319 9320 {
9320 9321 emlxs_hba_t *hba = HBA;
9321 9322 fc_packet_t *pkt;
9322 9323 CHANNEL *cp;
9323 9324 IOCBQ *iocbq;
9324 9325 IOCB *iocb;
9325 9326 uint32_t *cmd;
9326 9327 SLI_CT_REQUEST *CtCmd;
9327 9328 int32_t rval;
9328 9329
9329 9330 pkt = PRIV2PKT(sbp);
9330 9331 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
9331 9332 cmd = (uint32_t *)pkt->pkt_cmd;
9332 9333
9333 9334 iocbq = &sbp->iocbq;
9334 9335 iocb = &iocbq->iocb;
9335 9336
9336 9337 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9337 9338 emlxs_swap_ct_pkt(sbp);
9338 9339 #endif /* EMLXS_MODREV2X */
9339 9340
9340 9341 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9341 9342
9342 9343 if (rval == 0xff) {
9343 9344 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9344 9345 rval = FC_SUCCESS;
9345 9346 }
9346 9347
9347 9348 return (rval);
9348 9349 }
9349 9350
9350 9351 cp = &hba->chan[hba->channel_ct];
9351 9352 cp->ulpSendCmd++;
9352 9353
9353 9354 /* Initalize sbp */
9354 9355 mutex_enter(&sbp->mtx);
9355 9356 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9356 9357 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9357 9358 sbp->node = NULL;
9358 9359 sbp->lun = EMLXS_LUN_NONE;
9359 9360 sbp->class = iocb->ULPCLASS;
9360 9361 mutex_exit(&sbp->mtx);
9361 9362
9362 9363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
9363 9364 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
9364 9365 emlxs_rmcmd_xlate(LE_SWAP16(
9365 9366 CtCmd->CommandResponse.bits.CmdRsp)),
9366 9367 CtCmd->ReasonCode, CtCmd->Explanation,
9367 9368 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
9368 9369 pkt->pkt_cmd_fhdr.rx_id);
9369 9370
9370 9371 if (pkt->pkt_cmdlen) {
9371 9372 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9372 9373 DDI_DMA_SYNC_FORDEV);
9373 9374 }
9374 9375
9375 9376 HBASTATS.CtRspIssued++;
9376 9377
9377 9378 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9378 9379
9379 9380 return (FC_SUCCESS);
9380 9381
9381 9382 } /* emlxs_send_ct_rsp() */
9382 9383
9383 9384
9384 9385 /*
9385 9386 * emlxs_get_instance()
9386 9387 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
9387 9388 */
9388 9389 extern uint32_t
9389 9390 emlxs_get_instance(int32_t ddiinst)
9390 9391 {
9391 9392 uint32_t i;
9392 9393 uint32_t inst;
9393 9394
9394 9395 mutex_enter(&emlxs_device.lock);
9395 9396
9396 9397 inst = MAX_FC_BRDS;
9397 9398 for (i = 0; i < emlxs_instance_count; i++) {
9398 9399 if (emlxs_instance[i] == ddiinst) {
9399 9400 inst = i;
9400 9401 break;
9401 9402 }
9402 9403 }
9403 9404
9404 9405 mutex_exit(&emlxs_device.lock);
9405 9406
9406 9407 return (inst);
9407 9408
9408 9409 } /* emlxs_get_instance() */
9409 9410
9410 9411
9411 9412 /*
9412 9413 * emlxs_add_instance()
9413 9414 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
9414 9415 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
9415 9416 */
9416 9417 static uint32_t
9417 9418 emlxs_add_instance(int32_t ddiinst)
9418 9419 {
9419 9420 uint32_t i;
9420 9421
9421 9422 mutex_enter(&emlxs_device.lock);
9422 9423
9423 9424 /* First see if the ddiinst already exists */
9424 9425 for (i = 0; i < emlxs_instance_count; i++) {
9425 9426 if (emlxs_instance[i] == ddiinst) {
9426 9427 break;
9427 9428 }
9428 9429 }
9429 9430
9430 9431 /* If it doesn't already exist, add it */
9431 9432 if (i >= emlxs_instance_count) {
9432 9433 if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
9433 9434 emlxs_instance[i] = ddiinst;
9434 9435 emlxs_instance_count++;
9435 9436 emlxs_device.hba_count = emlxs_instance_count;
9436 9437 }
9437 9438 }
9438 9439
9439 9440 mutex_exit(&emlxs_device.lock);
9440 9441
9441 9442 return (i);
9442 9443
9443 9444 } /* emlxs_add_instance() */
9444 9445
9445 9446
9446 9447 /*ARGSUSED*/
9447 9448 extern void
9448 9449 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9449 9450 uint32_t doneq)
9450 9451 {
9451 9452 emlxs_hba_t *hba;
9452 9453 emlxs_port_t *port;
9453 9454 emlxs_buf_t *fpkt;
9454 9455
9455 9456 port = sbp->port;
9456 9457
9457 9458 if (!port) {
9458 9459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
9459 9460 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
9460 9461
9461 9462 return;
9462 9463 }
9463 9464
9464 9465 hba = HBA;
9465 9466
9466 9467 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
9467 9468 (sbp->iotag)) {
9468 9469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9469 9470 "WARNING: Completing IO with iotag. sbp=%p iotag=%x "
9470 9471 "xri_flags=%x",
9471 9472 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
9472 9473
9473 9474 emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
9474 9475 }
9475 9476
9476 9477 mutex_enter(&sbp->mtx);
9477 9478
9478 9479 /* Check for error conditions */
9479 9480 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
9480 9481 PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
9481 9482 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
9482 9483 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9483 9484 EMLXS_MSGF(EMLXS_CONTEXT,
9484 9485 &emlxs_pkt_completion_error_msg,
9485 9486 "Packet already returned. sbp=%p flags=%x", sbp,
9486 9487 sbp->pkt_flags);
9487 9488 }
9488 9489
9489 9490 else if (sbp->pkt_flags & PACKET_COMPLETED) {
9490 9491 EMLXS_MSGF(EMLXS_CONTEXT,
9491 9492 &emlxs_pkt_completion_error_msg,
9492 9493 "Packet already completed. sbp=%p flags=%x", sbp,
9493 9494 sbp->pkt_flags);
9494 9495 }
9495 9496
9496 9497 else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
9497 9498 EMLXS_MSGF(EMLXS_CONTEXT,
9498 9499 &emlxs_pkt_completion_error_msg,
9499 9500 "Pkt already on done queue. sbp=%p flags=%x", sbp,
9500 9501 sbp->pkt_flags);
9501 9502 }
9502 9503
9503 9504 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
9504 9505 EMLXS_MSGF(EMLXS_CONTEXT,
9505 9506 &emlxs_pkt_completion_error_msg,
9506 9507 "Packet already in completion. sbp=%p flags=%x",
9507 9508 sbp, sbp->pkt_flags);
9508 9509 }
9509 9510
9510 9511 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
9511 9512 EMLXS_MSGF(EMLXS_CONTEXT,
9512 9513 &emlxs_pkt_completion_error_msg,
9513 9514 "Packet still on chip queue. sbp=%p flags=%x",
9514 9515 sbp, sbp->pkt_flags);
9515 9516 }
9516 9517
9517 9518 else if (sbp->pkt_flags & PACKET_IN_TXQ) {
9518 9519 EMLXS_MSGF(EMLXS_CONTEXT,
9519 9520 &emlxs_pkt_completion_error_msg,
9520 9521 "Packet still on tx queue. sbp=%p flags=%x", sbp,
9521 9522 sbp->pkt_flags);
9522 9523 }
9523 9524
9524 9525 mutex_exit(&sbp->mtx);
9525 9526 return;
9526 9527 }
9527 9528
9528 9529 /* Packet is now in completion */
9529 9530 sbp->pkt_flags |= PACKET_IN_COMPLETION;
9530 9531
9531 9532 /* Set the state if not already set */
9532 9533 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9533 9534 emlxs_set_pkt_state(sbp, iostat, localstat, 0);
9534 9535 }
9535 9536
9536 9537 /* Check for parent flush packet */
9537 9538 /* If pkt has a parent flush packet then adjust its count now */
9538 9539 fpkt = sbp->fpkt;
9539 9540 if (fpkt) {
9540 9541 /*
9541 9542 * We will try to NULL sbp->fpkt inside the
9542 9543 * fpkt's mutex if possible
9543 9544 */
9544 9545
9545 9546 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
9546 9547 mutex_enter(&fpkt->mtx);
9547 9548 if (fpkt->flush_count) {
9548 9549 fpkt->flush_count--;
9549 9550 }
9550 9551 sbp->fpkt = NULL;
9551 9552 mutex_exit(&fpkt->mtx);
9552 9553 } else { /* fpkt has been returned already */
9553 9554
9554 9555 sbp->fpkt = NULL;
9555 9556 }
9556 9557 }
9557 9558
9558 9559 /* If pkt is polled, then wake up sleeping thread */
9559 9560 if (sbp->pkt_flags & PACKET_POLLED) {
9560 9561 /* Don't set the PACKET_ULP_OWNED flag here */
9561 9562 /* because the polling thread will do it */
9562 9563 sbp->pkt_flags |= PACKET_COMPLETED;
9563 9564 mutex_exit(&sbp->mtx);
9564 9565
9565 9566 /* Wake up sleeping thread */
9566 9567 mutex_enter(&EMLXS_PKT_LOCK);
9567 9568 cv_broadcast(&EMLXS_PKT_CV);
9568 9569 mutex_exit(&EMLXS_PKT_LOCK);
9569 9570 }
9570 9571
9571 9572 /* If packet was generated by our driver, */
9572 9573 /* then complete it immediately */
9573 9574 else if (sbp->pkt_flags & PACKET_ALLOCATED) {
9574 9575 mutex_exit(&sbp->mtx);
9575 9576
9576 9577 emlxs_iodone(sbp);
9577 9578 }
9578 9579
9579 9580 /* Put the pkt on the done queue for callback */
9580 9581 /* completion in another thread */
9581 9582 else {
9582 9583 sbp->pkt_flags |= PACKET_IN_DONEQ;
9583 9584 sbp->next = NULL;
9584 9585 mutex_exit(&sbp->mtx);
9585 9586
9586 9587 /* Put pkt on doneq, so I/O's will be completed in order */
9587 9588 mutex_enter(&EMLXS_PORT_LOCK);
9588 9589 if (hba->iodone_tail == NULL) {
9589 9590 hba->iodone_list = sbp;
9590 9591 hba->iodone_count = 1;
9591 9592 } else {
9592 9593 hba->iodone_tail->next = sbp;
9593 9594 hba->iodone_count++;
9594 9595 }
9595 9596 hba->iodone_tail = sbp;
9596 9597 mutex_exit(&EMLXS_PORT_LOCK);
9597 9598
9598 9599 /* Trigger a thread to service the doneq */
9599 9600 emlxs_thread_trigger1(&hba->iodone_thread,
9600 9601 emlxs_iodone_server);
9601 9602 }
9602 9603
9603 9604 return;
9604 9605
9605 9606 } /* emlxs_pkt_complete() */
9606 9607
9607 9608
9608 9609 #ifdef SAN_DIAG_SUPPORT
9609 9610 /*
9610 9611 * This routine is called with EMLXS_PORT_LOCK held so we can just increment
9611 9612 * normally. Don't have to use atomic operations.
9612 9613 */
9613 9614 extern void
9614 9615 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
9615 9616 {
9616 9617 emlxs_port_t *vport;
9617 9618 fc_packet_t *pkt;
9618 9619 uint32_t did;
9619 9620 hrtime_t t;
9620 9621 hrtime_t delta_time;
9621 9622 int i;
9622 9623 NODELIST *ndlp;
9623 9624
9624 9625 vport = sbp->port;
9625 9626
9626 9627 if ((sd_bucket.search_type == 0) ||
9627 9628 (vport->sd_io_latency_state != SD_COLLECTING))
9628 9629 return;
9629 9630
9630 9631 /* Compute the iolatency time in microseconds */
9631 9632 t = gethrtime();
9632 9633 delta_time = t - sbp->sd_start_time;
9633 9634 pkt = PRIV2PKT(sbp);
9634 9635 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9635 9636 ndlp = emlxs_node_find_did(vport, did);
9636 9637
9637 9638 if (ndlp) {
9638 9639 if (delta_time >=
9639 9640 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1])
9640 9641 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
9641 9642 count++;
9642 9643 else if (delta_time <= sd_bucket.values[0])
9643 9644 ndlp->sd_dev_bucket[0].count++;
9644 9645 else {
9645 9646 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
9646 9647 if ((delta_time > sd_bucket.values[i-1]) &&
9647 9648 (delta_time <= sd_bucket.values[i])) {
9648 9649 ndlp->sd_dev_bucket[i].count++;
9649 9650 break;
9650 9651 }
9651 9652 }
9652 9653 }
9653 9654 }
9654 9655 }
9655 9656 #endif /* SAN_DIAG_SUPPORT */
9656 9657
9657 9658 /*ARGSUSED*/
9658 9659 static void
9659 9660 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
9660 9661 {
9661 9662 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
9662 9663 emlxs_buf_t *sbp;
9663 9664
9664 9665 mutex_enter(&EMLXS_PORT_LOCK);
9665 9666
9666 9667 /* Remove one pkt from the doneq head and complete it */
9667 9668 while ((sbp = hba->iodone_list) != NULL) {
9668 9669 if ((hba->iodone_list = sbp->next) == NULL) {
9669 9670 hba->iodone_tail = NULL;
9670 9671 hba->iodone_count = 0;
9671 9672 } else {
9672 9673 hba->iodone_count--;
9673 9674 }
9674 9675
9675 9676 mutex_exit(&EMLXS_PORT_LOCK);
9676 9677
9677 9678 /* Prepare the pkt for completion */
9678 9679 mutex_enter(&sbp->mtx);
9679 9680 sbp->next = NULL;
9680 9681 sbp->pkt_flags &= ~PACKET_IN_DONEQ;
9681 9682 mutex_exit(&sbp->mtx);
9682 9683
9683 9684 /* Complete the IO now */
9684 9685 emlxs_iodone(sbp);
9685 9686
9686 9687 /* Reacquire lock and check if more work is to be done */
9687 9688 mutex_enter(&EMLXS_PORT_LOCK);
9688 9689 }
9689 9690
9690 9691 mutex_exit(&EMLXS_PORT_LOCK);
9691 9692
9692 9693 #ifdef FMA_SUPPORT
9693 9694 if (hba->flag & FC_DMA_CHECK_ERROR) {
9694 9695 emlxs_thread_spawn(hba, emlxs_restart_thread,
9695 9696 NULL, NULL);
9696 9697 }
9697 9698 #endif /* FMA_SUPPORT */
9698 9699
9699 9700 return;
9700 9701
9701 9702 } /* End emlxs_iodone_server */
9702 9703
9703 9704
9704 9705 static void
9705 9706 emlxs_iodone(emlxs_buf_t *sbp)
9706 9707 {
9707 9708 #ifdef FMA_SUPPORT
9708 9709 emlxs_port_t *port = sbp->port;
9709 9710 emlxs_hba_t *hba = port->hba;
9710 9711 #endif /* FMA_SUPPORT */
9711 9712
9712 9713 fc_packet_t *pkt;
9713 9714 CHANNEL *cp;
9714 9715
9715 9716 pkt = PRIV2PKT(sbp);
9716 9717
9717 9718 /* Check one more time that the pkt has not already been returned */
9718 9719 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
9719 9720 return;
9720 9721 }
9721 9722
9722 9723 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9723 9724 emlxs_unswap_pkt(sbp);
9724 9725 #endif /* EMLXS_MODREV2X */
9725 9726
9726 9727 mutex_enter(&sbp->mtx);
9727 9728 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
9728 9729 mutex_exit(&sbp->mtx);
9729 9730
9730 9731 if (pkt->pkt_comp) {
9731 9732 #ifdef FMA_SUPPORT
9732 9733 emlxs_check_dma(hba, sbp);
9733 9734 #endif /* FMA_SUPPORT */
9734 9735
9735 9736 if (sbp->channel) {
9736 9737 cp = (CHANNEL *)sbp->channel;
9737 9738 cp->ulpCmplCmd++;
9738 9739 }
9739 9740
9740 9741 (*pkt->pkt_comp) (pkt);
9741 9742 }
9742 9743
9743 9744 return;
9744 9745
9745 9746 } /* emlxs_iodone() */
9746 9747
9747 9748
9748 9749
9749 9750 extern fc_unsol_buf_t *
9750 9751 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
9751 9752 {
9752 9753 emlxs_unsol_buf_t *pool;
9753 9754 fc_unsol_buf_t *ubp;
9754 9755 emlxs_ub_priv_t *ub_priv;
9755 9756
9756 9757 /* Check if this is a valid ub token */
9757 9758 if (token < EMLXS_UB_TOKEN_OFFSET) {
9758 9759 return (NULL);
9759 9760 }
9760 9761
9761 9762 mutex_enter(&EMLXS_UB_LOCK);
9762 9763
9763 9764 pool = port->ub_pool;
9764 9765 while (pool) {
9765 9766 /* Find a pool with the proper token range */
9766 9767 if (token >= pool->pool_first_token &&
9767 9768 token <= pool->pool_last_token) {
9768 9769 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
9769 9770 pool->pool_first_token)];
9770 9771 ub_priv = ubp->ub_fca_private;
9771 9772
9772 9773 if (ub_priv->token != token) {
9773 9774 EMLXS_MSGF(EMLXS_CONTEXT,
9774 9775 &emlxs_sfs_debug_msg,
9775 9776 "ub_find: Invalid token=%x", ubp, token,
9776 9777 ub_priv->token);
9777 9778
9778 9779 ubp = NULL;
9779 9780 }
9780 9781
9781 9782 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
9782 9783 EMLXS_MSGF(EMLXS_CONTEXT,
9783 9784 &emlxs_sfs_debug_msg,
9784 9785 "ub_find: Buffer not in use. buffer=%p "
9785 9786 "token=%x", ubp, token);
9786 9787
9787 9788 ubp = NULL;
9788 9789 }
9789 9790
9790 9791 mutex_exit(&EMLXS_UB_LOCK);
9791 9792
9792 9793 return (ubp);
9793 9794 }
9794 9795
9795 9796 pool = pool->pool_next;
9796 9797 }
9797 9798
9798 9799 mutex_exit(&EMLXS_UB_LOCK);
9799 9800
9800 9801 return (NULL);
9801 9802
9802 9803 } /* emlxs_ub_find() */
9803 9804
9804 9805
9805 9806
9806 9807 extern fc_unsol_buf_t *
9807 9808 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
9808 9809 uint32_t reserve)
9809 9810 {
9810 9811 emlxs_hba_t *hba = HBA;
9811 9812 emlxs_unsol_buf_t *pool;
9812 9813 fc_unsol_buf_t *ubp;
9813 9814 emlxs_ub_priv_t *ub_priv;
9814 9815 uint32_t i;
9815 9816 uint32_t resv_flag;
9816 9817 uint32_t pool_free;
9817 9818 uint32_t pool_free_resv;
9818 9819
9819 9820 mutex_enter(&EMLXS_UB_LOCK);
9820 9821
9821 9822 pool = port->ub_pool;
9822 9823 while (pool) {
9823 9824 /* Find a pool of the appropriate type and size */
9824 9825 if ((pool->pool_available == 0) ||
9825 9826 (pool->pool_type != type) ||
9826 9827 (pool->pool_buf_size < size)) {
9827 9828 goto next_pool;
9828 9829 }
9829 9830
9830 9831
9831 9832 /* Adjust free counts based on availablity */
9832 9833 /* The free reserve count gets first priority */
9833 9834 pool_free_resv =
9834 9835 min(pool->pool_free_resv, pool->pool_available);
9835 9836 pool_free =
9836 9837 min(pool->pool_free,
9837 9838 (pool->pool_available - pool_free_resv));
9838 9839
9839 9840 /* Initialize reserve flag */
9840 9841 resv_flag = reserve;
9841 9842
9842 9843 if (resv_flag) {
9843 9844 if (pool_free_resv == 0) {
9844 9845 if (pool_free == 0) {
9845 9846 goto next_pool;
9846 9847 }
9847 9848 resv_flag = 0;
9848 9849 }
9849 9850 } else if (pool_free == 0) {
9850 9851 goto next_pool;
9851 9852 }
9852 9853
9853 9854 /* Find next available free buffer in this pool */
9854 9855 for (i = 0; i < pool->pool_nentries; i++) {
9855 9856 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
9856 9857 ub_priv = ubp->ub_fca_private;
9857 9858
9858 9859 if (!ub_priv->available ||
9859 9860 ub_priv->flags != EMLXS_UB_FREE) {
9860 9861 continue;
9861 9862 }
9862 9863
9863 9864 ub_priv->time = hba->timer_tics;
9864 9865
9865 9866 /* Timeout in 5 minutes */
9866 9867 ub_priv->timeout = (5 * 60);
9867 9868
9868 9869 ub_priv->flags = EMLXS_UB_IN_USE;
9869 9870
9870 9871 /* Alloc the buffer from the pool */
9871 9872 if (resv_flag) {
9872 9873 ub_priv->flags |= EMLXS_UB_RESV;
9873 9874 pool->pool_free_resv--;
9874 9875 } else {
9875 9876 pool->pool_free--;
9876 9877 }
9877 9878
9878 9879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
9879 9880 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
9880 9881 ub_priv->token, pool->pool_nentries,
9881 9882 pool->pool_available, pool->pool_free,
9882 9883 pool->pool_free_resv);
9883 9884
9884 9885 mutex_exit(&EMLXS_UB_LOCK);
9885 9886
9886 9887 return (ubp);
9887 9888 }
9888 9889 next_pool:
9889 9890
9890 9891 pool = pool->pool_next;
9891 9892 }
9892 9893
9893 9894 mutex_exit(&EMLXS_UB_LOCK);
9894 9895
9895 9896 return (NULL);
9896 9897
9897 9898 } /* emlxs_ub_get() */
9898 9899
9899 9900
9900 9901
9901 9902 extern void
9902 9903 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
9903 9904 uint32_t lock)
9904 9905 {
9905 9906 fc_packet_t *pkt;
9906 9907 fcp_rsp_t *fcp_rsp;
9907 9908 uint32_t i;
9908 9909 emlxs_xlat_err_t *tptr;
9909 9910 emlxs_xlat_err_t *entry;
9910 9911
9911 9912
9912 9913 pkt = PRIV2PKT(sbp);
9913 9914
9914 9915 if (lock) {
9915 9916 mutex_enter(&sbp->mtx);
9916 9917 }
9917 9918
9918 9919 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
9919 9920 sbp->pkt_flags |= PACKET_STATE_VALID;
9920 9921
9921 9922 /* Perform table lookup */
9922 9923 entry = NULL;
9923 9924 if (iostat != IOSTAT_LOCAL_REJECT) {
9924 9925 tptr = emlxs_iostat_tbl;
9925 9926 for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
9926 9927 if (iostat == tptr->emlxs_status) {
9927 9928 entry = tptr;
9928 9929 break;
9929 9930 }
9930 9931 }
9931 9932 } else { /* iostate == IOSTAT_LOCAL_REJECT */
9932 9933
9933 9934 tptr = emlxs_ioerr_tbl;
9934 9935 for (i = 0; i < IOERR_MAX; i++, tptr++) {
9935 9936 if (localstat == tptr->emlxs_status) {
9936 9937 entry = tptr;
9937 9938 break;
9938 9939 }
9939 9940 }
9940 9941 }
9941 9942
9942 9943 if (entry) {
9943 9944 pkt->pkt_state = entry->pkt_state;
9944 9945 pkt->pkt_reason = entry->pkt_reason;
9945 9946 pkt->pkt_expln = entry->pkt_expln;
9946 9947 pkt->pkt_action = entry->pkt_action;
9947 9948 } else {
9948 9949 /* Set defaults */
9949 9950 pkt->pkt_state = FC_PKT_TRAN_ERROR;
9950 9951 pkt->pkt_reason = FC_REASON_ABORTED;
9951 9952 pkt->pkt_expln = FC_EXPLN_NONE;
9952 9953 pkt->pkt_action = FC_ACTION_RETRYABLE;
9953 9954 }
9954 9955
9955 9956
9956 9957 /* Set the residual counts and response frame */
9957 9958 /* Check if response frame was received from the chip */
9958 9959 /* If so, then the residual counts will already be set */
9959 9960 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
9960 9961 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
9961 9962 /* We have to create the response frame */
9962 9963 if (iostat == IOSTAT_SUCCESS) {
9963 9964 pkt->pkt_resp_resid = 0;
9964 9965 pkt->pkt_data_resid = 0;
9965 9966
9966 9967 if ((pkt->pkt_cmd_fhdr.type ==
9967 9968 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
9968 9969 pkt->pkt_resp) {
9969 9970 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
9970 9971
9971 9972 fcp_rsp->fcp_u.fcp_status.
9972 9973 rsp_len_set = 1;
9973 9974 fcp_rsp->fcp_response_len = 8;
9974 9975 }
9975 9976 } else {
9976 9977 /* Otherwise assume no data */
9977 9978 /* and no response received */
9978 9979 pkt->pkt_data_resid = pkt->pkt_datalen;
9979 9980 pkt->pkt_resp_resid = pkt->pkt_rsplen;
9980 9981 }
9981 9982 }
9982 9983 }
9983 9984
9984 9985 if (lock) {
9985 9986 mutex_exit(&sbp->mtx);
9986 9987 }
9987 9988
9988 9989 return;
9989 9990
9990 9991 } /* emlxs_set_pkt_state() */
9991 9992
9992 9993
9993 9994 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9994 9995
9995 9996 extern void
9996 9997 emlxs_swap_service_params(SERV_PARM *sp)
9997 9998 {
9998 9999 uint16_t *p;
9999 10000 int size;
10000 10001 int i;
10001 10002
10002 10003 size = (sizeof (CSP) - 4) / 2;
10003 10004 p = (uint16_t *)&sp->cmn;
10004 10005 for (i = 0; i < size; i++) {
10005 10006 p[i] = LE_SWAP16(p[i]);
10006 10007 }
10007 10008 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10008 10009
10009 10010 size = sizeof (CLASS_PARMS) / 2;
10010 10011 p = (uint16_t *)&sp->cls1;
10011 10012 for (i = 0; i < size; i++, p++) {
10012 10013 *p = LE_SWAP16(*p);
10013 10014 }
10014 10015
10015 10016 size = sizeof (CLASS_PARMS) / 2;
10016 10017 p = (uint16_t *)&sp->cls2;
10017 10018 for (i = 0; i < size; i++, p++) {
10018 10019 *p = LE_SWAP16(*p);
10019 10020 }
10020 10021
10021 10022 size = sizeof (CLASS_PARMS) / 2;
10022 10023 p = (uint16_t *)&sp->cls3;
10023 10024 for (i = 0; i < size; i++, p++) {
10024 10025 *p = LE_SWAP16(*p);
10025 10026 }
10026 10027
10027 10028 size = sizeof (CLASS_PARMS) / 2;
10028 10029 p = (uint16_t *)&sp->cls4;
10029 10030 for (i = 0; i < size; i++, p++) {
10030 10031 *p = LE_SWAP16(*p);
10031 10032 }
10032 10033
10033 10034 return;
10034 10035
10035 10036 } /* emlxs_swap_service_params() */
10036 10037
10037 10038 extern void
10038 10039 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10039 10040 {
10040 10041 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10041 10042 emlxs_swap_fcp_pkt(sbp);
10042 10043 }
10043 10044
10044 10045 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10045 10046 emlxs_swap_els_pkt(sbp);
10046 10047 }
10047 10048
10048 10049 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10049 10050 emlxs_swap_ct_pkt(sbp);
10050 10051 }
10051 10052
10052 10053 } /* emlxs_unswap_pkt() */
10053 10054
10054 10055
10055 10056 extern void
10056 10057 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10057 10058 {
10058 10059 fc_packet_t *pkt;
10059 10060 FCP_CMND *cmd;
10060 10061 fcp_rsp_t *rsp;
10061 10062 uint16_t *lunp;
10062 10063 uint32_t i;
10063 10064
10064 10065 mutex_enter(&sbp->mtx);
10065 10066
10066 10067 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10067 10068 mutex_exit(&sbp->mtx);
10068 10069 return;
10069 10070 }
10070 10071
10071 10072 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10072 10073 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10073 10074 } else {
10074 10075 sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10075 10076 }
10076 10077
10077 10078 mutex_exit(&sbp->mtx);
10078 10079
10079 10080 pkt = PRIV2PKT(sbp);
10080 10081
10081 10082 cmd = (FCP_CMND *)pkt->pkt_cmd;
10082 10083 rsp = (pkt->pkt_rsplen &&
10083 10084 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10084 10085 (fcp_rsp_t *)pkt->pkt_resp : NULL;
10085 10086
10086 10087 /* The size of data buffer needs to be swapped. */
10087 10088 cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10088 10089
10089 10090 /*
10090 10091 * Swap first 2 words of FCP CMND payload.
10091 10092 */
10092 10093 lunp = (uint16_t *)&cmd->fcpLunMsl;
10093 10094 for (i = 0; i < 4; i++) {
10094 10095 lunp[i] = LE_SWAP16(lunp[i]);
10095 10096 }
10096 10097
10097 10098 if (rsp) {
10098 10099 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10099 10100 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10100 10101 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10101 10102 }
10102 10103
10103 10104 return;
10104 10105
10105 10106 } /* emlxs_swap_fcp_pkt() */
10106 10107
10107 10108
10108 10109 extern void
10109 10110 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10110 10111 {
10111 10112 fc_packet_t *pkt;
10112 10113 uint32_t *cmd;
10113 10114 uint32_t *rsp;
10114 10115 uint32_t command;
10115 10116 uint16_t *c;
10116 10117 uint32_t i;
10117 10118 uint32_t swapped;
10118 10119
10119 10120 mutex_enter(&sbp->mtx);
10120 10121
10121 10122 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10122 10123 mutex_exit(&sbp->mtx);
10123 10124 return;
10124 10125 }
10125 10126
10126 10127 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10127 10128 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10128 10129 swapped = 1;
10129 10130 } else {
10130 10131 sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10131 10132 swapped = 0;
10132 10133 }
10133 10134
10134 10135 mutex_exit(&sbp->mtx);
10135 10136
10136 10137 pkt = PRIV2PKT(sbp);
10137 10138
10138 10139 cmd = (uint32_t *)pkt->pkt_cmd;
10139 10140 rsp = (pkt->pkt_rsplen &&
10140 10141 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10141 10142 (uint32_t *)pkt->pkt_resp : NULL;
10142 10143
10143 10144 if (!swapped) {
10144 10145 cmd[0] = LE_SWAP32(cmd[0]);
10145 10146 command = cmd[0] & ELS_CMD_MASK;
10146 10147 } else {
10147 10148 command = cmd[0] & ELS_CMD_MASK;
10148 10149 cmd[0] = LE_SWAP32(cmd[0]);
10149 10150 }
10150 10151
10151 10152 if (rsp) {
10152 10153 rsp[0] = LE_SWAP32(rsp[0]);
10153 10154 }
10154 10155
10155 10156 switch (command) {
10156 10157 case ELS_CMD_ACC:
10157 10158 if (sbp->ucmd == ELS_CMD_ADISC) {
10158 10159 /* Hard address of originator */
10159 10160 cmd[1] = LE_SWAP32(cmd[1]);
10160 10161
10161 10162 /* N_Port ID of originator */
10162 10163 cmd[6] = LE_SWAP32(cmd[6]);
10163 10164 }
10164 10165 break;
10165 10166
10166 10167 case ELS_CMD_PLOGI:
10167 10168 case ELS_CMD_FLOGI:
10168 10169 case ELS_CMD_FDISC:
10169 10170 if (rsp) {
10170 10171 emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10171 10172 }
10172 10173 break;
10173 10174
10174 10175 case ELS_CMD_LOGO:
10175 10176 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */
10176 10177 break;
10177 10178
10178 10179 case ELS_CMD_RLS:
10179 10180 cmd[1] = LE_SWAP32(cmd[1]);
10180 10181
10181 10182 if (rsp) {
10182 10183 for (i = 0; i < 6; i++) {
10183 10184 rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10184 10185 }
10185 10186 }
10186 10187 break;
10187 10188
10188 10189 case ELS_CMD_ADISC:
10189 10190 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */
10190 10191 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */
10191 10192 break;
10192 10193
10193 10194 case ELS_CMD_PRLI:
10194 10195 c = (uint16_t *)&cmd[1];
10195 10196 c[1] = LE_SWAP16(c[1]);
10196 10197
10197 10198 cmd[4] = LE_SWAP32(cmd[4]);
10198 10199
10199 10200 if (rsp) {
10200 10201 rsp[4] = LE_SWAP32(rsp[4]);
10201 10202 }
10202 10203 break;
10203 10204
10204 10205 case ELS_CMD_SCR:
10205 10206 cmd[1] = LE_SWAP32(cmd[1]);
10206 10207 break;
10207 10208
10208 10209 case ELS_CMD_LINIT:
10209 10210 if (rsp) {
10210 10211 rsp[1] = LE_SWAP32(rsp[1]);
10211 10212 }
10212 10213 break;
10213 10214
10214 10215 default:
10215 10216 break;
10216 10217 }
10217 10218
10218 10219 return;
10219 10220
10220 10221 } /* emlxs_swap_els_pkt() */
10221 10222
10222 10223
10223 10224 extern void
10224 10225 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10225 10226 {
10226 10227 fc_packet_t *pkt;
10227 10228 uint32_t *cmd;
10228 10229 uint32_t *rsp;
10229 10230 uint32_t command;
10230 10231 uint32_t i;
10231 10232 uint32_t swapped;
10232 10233
10233 10234 mutex_enter(&sbp->mtx);
10234 10235
10235 10236 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10236 10237 mutex_exit(&sbp->mtx);
10237 10238 return;
10238 10239 }
10239 10240
10240 10241 if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10241 10242 sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10242 10243 swapped = 1;
10243 10244 } else {
10244 10245 sbp->pkt_flags |= PACKET_CT_SWAPPED;
10245 10246 swapped = 0;
10246 10247 }
10247 10248
10248 10249 mutex_exit(&sbp->mtx);
10249 10250
10250 10251 pkt = PRIV2PKT(sbp);
10251 10252
10252 10253 cmd = (uint32_t *)pkt->pkt_cmd;
10253 10254 rsp = (pkt->pkt_rsplen &&
10254 10255 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
10255 10256 (uint32_t *)pkt->pkt_resp : NULL;
10256 10257
10257 10258 if (!swapped) {
10258 10259 cmd[0] = 0x01000000;
10259 10260 command = cmd[2];
10260 10261 }
10261 10262
10262 10263 cmd[0] = LE_SWAP32(cmd[0]);
10263 10264 cmd[1] = LE_SWAP32(cmd[1]);
10264 10265 cmd[2] = LE_SWAP32(cmd[2]);
10265 10266 cmd[3] = LE_SWAP32(cmd[3]);
10266 10267
10267 10268 if (swapped) {
10268 10269 command = cmd[2];
10269 10270 }
10270 10271
10271 10272 switch ((command >> 16)) {
10272 10273 case SLI_CTNS_GA_NXT:
10273 10274 cmd[4] = LE_SWAP32(cmd[4]);
10274 10275 break;
10275 10276
10276 10277 case SLI_CTNS_GPN_ID:
10277 10278 case SLI_CTNS_GNN_ID:
10278 10279 case SLI_CTNS_RPN_ID:
10279 10280 case SLI_CTNS_RNN_ID:
10280 10281 case SLI_CTNS_RSPN_ID:
10281 10282 cmd[4] = LE_SWAP32(cmd[4]);
10282 10283 break;
10283 10284
10284 10285 case SLI_CTNS_RCS_ID:
10285 10286 case SLI_CTNS_RPT_ID:
10286 10287 cmd[4] = LE_SWAP32(cmd[4]);
10287 10288 cmd[5] = LE_SWAP32(cmd[5]);
10288 10289 break;
10289 10290
10290 10291 case SLI_CTNS_RFT_ID:
10291 10292 cmd[4] = LE_SWAP32(cmd[4]);
10292 10293
10293 10294 /* Swap FC4 types */
10294 10295 for (i = 0; i < 8; i++) {
10295 10296 cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
10296 10297 }
10297 10298 break;
10298 10299
10299 10300 case SLI_CTNS_GFT_ID:
10300 10301 if (rsp) {
10301 10302 /* Swap FC4 types */
10302 10303 for (i = 0; i < 8; i++) {
10303 10304 rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
10304 10305 }
10305 10306 }
10306 10307 break;
10307 10308
10308 10309 case SLI_CTNS_GCS_ID:
10309 10310 case SLI_CTNS_GSPN_ID:
10310 10311 case SLI_CTNS_GSNN_NN:
10311 10312 case SLI_CTNS_GIP_NN:
10312 10313 case SLI_CTNS_GIPA_NN:
10313 10314
10314 10315 case SLI_CTNS_GPT_ID:
10315 10316 case SLI_CTNS_GID_NN:
10316 10317 case SLI_CTNS_GNN_IP:
10317 10318 case SLI_CTNS_GIPA_IP:
10318 10319 case SLI_CTNS_GID_FT:
10319 10320 case SLI_CTNS_GID_PT:
10320 10321 case SLI_CTNS_GID_PN:
10321 10322 case SLI_CTNS_RIP_NN:
10322 10323 case SLI_CTNS_RIPA_NN:
10323 10324 case SLI_CTNS_RSNN_NN:
10324 10325 case SLI_CTNS_DA_ID:
10325 10326 case SLI_CT_RESPONSE_FS_RJT:
10326 10327 case SLI_CT_RESPONSE_FS_ACC:
10327 10328
10328 10329 default:
10329 10330 break;
10330 10331 }
10331 10332 return;
10332 10333
10333 10334 } /* emlxs_swap_ct_pkt() */
10334 10335
10335 10336
10336 10337 extern void
10337 10338 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
10338 10339 {
10339 10340 emlxs_ub_priv_t *ub_priv;
10340 10341 fc_rscn_t *rscn;
10341 10342 uint32_t count;
10342 10343 uint32_t i;
10343 10344 uint32_t *lp;
10344 10345 la_els_logi_t *logi;
10345 10346
10346 10347 ub_priv = ubp->ub_fca_private;
10347 10348
10348 10349 switch (ub_priv->cmd) {
10349 10350 case ELS_CMD_RSCN:
10350 10351 rscn = (fc_rscn_t *)ubp->ub_buffer;
10351 10352
10352 10353 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
10353 10354
10354 10355 count = ((rscn->rscn_payload_len - 4) / 4);
10355 10356 lp = (uint32_t *)ubp->ub_buffer + 1;
10356 10357 for (i = 0; i < count; i++, lp++) {
10357 10358 *lp = LE_SWAP32(*lp);
10358 10359 }
10359 10360
10360 10361 break;
10361 10362
10362 10363 case ELS_CMD_FLOGI:
10363 10364 case ELS_CMD_PLOGI:
10364 10365 case ELS_CMD_FDISC:
10365 10366 case ELS_CMD_PDISC:
10366 10367 logi = (la_els_logi_t *)ubp->ub_buffer;
10367 10368 emlxs_swap_service_params(
10368 10369 (SERV_PARM *)&logi->common_service);
10369 10370 break;
10370 10371
10371 10372 /* ULP handles this */
10372 10373 case ELS_CMD_LOGO:
10373 10374 case ELS_CMD_PRLI:
10374 10375 case ELS_CMD_PRLO:
10375 10376 case ELS_CMD_ADISC:
10376 10377 default:
10377 10378 break;
10378 10379 }
10379 10380
10380 10381 return;
10381 10382
10382 10383 } /* emlxs_swap_els_ub() */
10383 10384
10384 10385
10385 10386 #endif /* EMLXS_MODREV2X */
10386 10387
10387 10388
10388 10389 extern char *
10389 10390 emlxs_elscmd_xlate(uint32_t elscmd)
10390 10391 {
10391 10392 static char buffer[32];
10392 10393 uint32_t i;
10393 10394 uint32_t count;
10394 10395
10395 10396 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
10396 10397 for (i = 0; i < count; i++) {
10397 10398 if (elscmd == emlxs_elscmd_table[i].code) {
10398 10399 return (emlxs_elscmd_table[i].string);
10399 10400 }
10400 10401 }
10401 10402
10402 10403 (void) sprintf(buffer, "ELS=0x%x", elscmd);
10403 10404 return (buffer);
10404 10405
10405 10406 } /* emlxs_elscmd_xlate() */
10406 10407
10407 10408
10408 10409 extern char *
10409 10410 emlxs_ctcmd_xlate(uint32_t ctcmd)
10410 10411 {
10411 10412 static char buffer[32];
10412 10413 uint32_t i;
10413 10414 uint32_t count;
10414 10415
10415 10416 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
10416 10417 for (i = 0; i < count; i++) {
10417 10418 if (ctcmd == emlxs_ctcmd_table[i].code) {
10418 10419 return (emlxs_ctcmd_table[i].string);
10419 10420 }
10420 10421 }
10421 10422
10422 10423 (void) sprintf(buffer, "cmd=0x%x", ctcmd);
10423 10424 return (buffer);
10424 10425
10425 10426 } /* emlxs_ctcmd_xlate() */
10426 10427
10427 10428
10428 10429 #ifdef MENLO_SUPPORT
10429 10430 extern char *
10430 10431 emlxs_menlo_cmd_xlate(uint32_t cmd)
10431 10432 {
10432 10433 static char buffer[32];
10433 10434 uint32_t i;
10434 10435 uint32_t count;
10435 10436
10436 10437 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
10437 10438 for (i = 0; i < count; i++) {
10438 10439 if (cmd == emlxs_menlo_cmd_table[i].code) {
10439 10440 return (emlxs_menlo_cmd_table[i].string);
10440 10441 }
10441 10442 }
10442 10443
10443 10444 (void) sprintf(buffer, "Cmd=0x%x", cmd);
10444 10445 return (buffer);
10445 10446
10446 10447 } /* emlxs_menlo_cmd_xlate() */
10447 10448
10448 10449 extern char *
10449 10450 emlxs_menlo_rsp_xlate(uint32_t rsp)
10450 10451 {
10451 10452 static char buffer[32];
10452 10453 uint32_t i;
10453 10454 uint32_t count;
10454 10455
10455 10456 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
10456 10457 for (i = 0; i < count; i++) {
10457 10458 if (rsp == emlxs_menlo_rsp_table[i].code) {
10458 10459 return (emlxs_menlo_rsp_table[i].string);
10459 10460 }
10460 10461 }
10461 10462
10462 10463 (void) sprintf(buffer, "Rsp=0x%x", rsp);
10463 10464 return (buffer);
10464 10465
10465 10466 } /* emlxs_menlo_rsp_xlate() */
10466 10467
10467 10468 #endif /* MENLO_SUPPORT */
10468 10469
10469 10470
10470 10471 extern char *
10471 10472 emlxs_rmcmd_xlate(uint32_t rmcmd)
10472 10473 {
10473 10474 static char buffer[32];
10474 10475 uint32_t i;
10475 10476 uint32_t count;
10476 10477
10477 10478 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
10478 10479 for (i = 0; i < count; i++) {
10479 10480 if (rmcmd == emlxs_rmcmd_table[i].code) {
10480 10481 return (emlxs_rmcmd_table[i].string);
10481 10482 }
10482 10483 }
10483 10484
10484 10485 (void) sprintf(buffer, "RM=0x%x", rmcmd);
10485 10486 return (buffer);
10486 10487
10487 10488 } /* emlxs_rmcmd_xlate() */
10488 10489
10489 10490
10490 10491
10491 10492 extern char *
10492 10493 emlxs_mscmd_xlate(uint16_t mscmd)
10493 10494 {
10494 10495 static char buffer[32];
10495 10496 uint32_t i;
10496 10497 uint32_t count;
10497 10498
10498 10499 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
10499 10500 for (i = 0; i < count; i++) {
10500 10501 if (mscmd == emlxs_mscmd_table[i].code) {
10501 10502 return (emlxs_mscmd_table[i].string);
10502 10503 }
10503 10504 }
10504 10505
10505 10506 (void) sprintf(buffer, "Cmd=0x%x", mscmd);
10506 10507 return (buffer);
10507 10508
10508 10509 } /* emlxs_mscmd_xlate() */
10509 10510
10510 10511
10511 10512 extern char *
10512 10513 emlxs_state_xlate(uint8_t state)
10513 10514 {
10514 10515 static char buffer[32];
10515 10516 uint32_t i;
10516 10517 uint32_t count;
10517 10518
10518 10519 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
10519 10520 for (i = 0; i < count; i++) {
10520 10521 if (state == emlxs_state_table[i].code) {
10521 10522 return (emlxs_state_table[i].string);
10522 10523 }
10523 10524 }
10524 10525
10525 10526 (void) sprintf(buffer, "State=0x%x", state);
10526 10527 return (buffer);
10527 10528
10528 10529 } /* emlxs_state_xlate() */
10529 10530
10530 10531
10531 10532 extern char *
10532 10533 emlxs_error_xlate(uint8_t errno)
10533 10534 {
10534 10535 static char buffer[32];
10535 10536 uint32_t i;
10536 10537 uint32_t count;
10537 10538
10538 10539 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
10539 10540 for (i = 0; i < count; i++) {
10540 10541 if (errno == emlxs_error_table[i].code) {
10541 10542 return (emlxs_error_table[i].string);
10542 10543 }
10543 10544 }
10544 10545
10545 10546 (void) sprintf(buffer, "Errno=0x%x", errno);
10546 10547 return (buffer);
10547 10548
10548 10549 } /* emlxs_error_xlate() */
10549 10550
10550 10551
10551 10552 static int
10552 10553 emlxs_pm_lower_power(dev_info_t *dip)
10553 10554 {
10554 10555 int ddiinst;
10555 10556 int emlxinst;
10556 10557 emlxs_config_t *cfg;
10557 10558 int32_t rval;
10558 10559 emlxs_hba_t *hba;
10559 10560
10560 10561 ddiinst = ddi_get_instance(dip);
10561 10562 emlxinst = emlxs_get_instance(ddiinst);
10562 10563 hba = emlxs_device.hba[emlxinst];
10563 10564 cfg = &CFG;
10564 10565
10565 10566 rval = DDI_SUCCESS;
10566 10567
10567 10568 /* Lower the power level */
10568 10569 if (cfg[CFG_PM_SUPPORT].current) {
10569 10570 rval =
10570 10571 pm_lower_power(dip, EMLXS_PM_ADAPTER,
10571 10572 EMLXS_PM_ADAPTER_DOWN);
10572 10573 } else {
10573 10574 /* We do not have kernel support of power management enabled */
10574 10575 /* therefore, call our power management routine directly */
10575 10576 rval =
10576 10577 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
10577 10578 }
10578 10579
10579 10580 return (rval);
10580 10581
10581 10582 } /* emlxs_pm_lower_power() */
10582 10583
10583 10584
10584 10585 static int
10585 10586 emlxs_pm_raise_power(dev_info_t *dip)
10586 10587 {
10587 10588 int ddiinst;
10588 10589 int emlxinst;
10589 10590 emlxs_config_t *cfg;
10590 10591 int32_t rval;
10591 10592 emlxs_hba_t *hba;
10592 10593
10593 10594 ddiinst = ddi_get_instance(dip);
10594 10595 emlxinst = emlxs_get_instance(ddiinst);
10595 10596 hba = emlxs_device.hba[emlxinst];
10596 10597 cfg = &CFG;
10597 10598
10598 10599 /* Raise the power level */
10599 10600 if (cfg[CFG_PM_SUPPORT].current) {
10600 10601 rval =
10601 10602 pm_raise_power(dip, EMLXS_PM_ADAPTER,
10602 10603 EMLXS_PM_ADAPTER_UP);
10603 10604 } else {
10604 10605 /* We do not have kernel support of power management enabled */
10605 10606 /* therefore, call our power management routine directly */
10606 10607 rval =
10607 10608 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
10608 10609 }
10609 10610
10610 10611 return (rval);
10611 10612
10612 10613 } /* emlxs_pm_raise_power() */
10613 10614
10614 10615
10615 10616 #ifdef IDLE_TIMER
10616 10617
10617 10618 extern int
10618 10619 emlxs_pm_busy_component(emlxs_hba_t *hba)
10619 10620 {
10620 10621 emlxs_config_t *cfg = &CFG;
10621 10622 int rval;
10622 10623
10623 10624 hba->pm_active = 1;
10624 10625
10625 10626 if (hba->pm_busy) {
10626 10627 return (DDI_SUCCESS);
10627 10628 }
10628 10629
10629 10630 mutex_enter(&EMLXS_PM_LOCK);
10630 10631
10631 10632 if (hba->pm_busy) {
10632 10633 mutex_exit(&EMLXS_PM_LOCK);
10633 10634 return (DDI_SUCCESS);
10634 10635 }
10635 10636 hba->pm_busy = 1;
10636 10637
10637 10638 mutex_exit(&EMLXS_PM_LOCK);
10638 10639
10639 10640 /* Attempt to notify system that we are busy */
10640 10641 if (cfg[CFG_PM_SUPPORT].current) {
10641 10642 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10642 10643 "pm_busy_component.");
10643 10644
10644 10645 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
10645 10646
10646 10647 if (rval != DDI_SUCCESS) {
10647 10648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10648 10649 "pm_busy_component failed. ret=%d", rval);
10649 10650
10650 10651 /* If this attempt failed then clear our flags */
10651 10652 mutex_enter(&EMLXS_PM_LOCK);
10652 10653 hba->pm_busy = 0;
10653 10654 mutex_exit(&EMLXS_PM_LOCK);
10654 10655
10655 10656 return (rval);
10656 10657 }
10657 10658 }
10658 10659
10659 10660 return (DDI_SUCCESS);
10660 10661
10661 10662 } /* emlxs_pm_busy_component() */
10662 10663
10663 10664
10664 10665 extern int
10665 10666 emlxs_pm_idle_component(emlxs_hba_t *hba)
10666 10667 {
10667 10668 emlxs_config_t *cfg = &CFG;
10668 10669 int rval;
10669 10670
10670 10671 if (!hba->pm_busy) {
10671 10672 return (DDI_SUCCESS);
10672 10673 }
10673 10674
10674 10675 mutex_enter(&EMLXS_PM_LOCK);
10675 10676
10676 10677 if (!hba->pm_busy) {
10677 10678 mutex_exit(&EMLXS_PM_LOCK);
10678 10679 return (DDI_SUCCESS);
10679 10680 }
10680 10681 hba->pm_busy = 0;
10681 10682
10682 10683 mutex_exit(&EMLXS_PM_LOCK);
10683 10684
10684 10685 if (cfg[CFG_PM_SUPPORT].current) {
10685 10686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10686 10687 "pm_idle_component.");
10687 10688
10688 10689 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
10689 10690
10690 10691 if (rval != DDI_SUCCESS) {
10691 10692 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
10692 10693 "pm_idle_component failed. ret=%d", rval);
10693 10694
10694 10695 /* If this attempt failed then */
10695 10696 /* reset our flags for another attempt */
10696 10697 mutex_enter(&EMLXS_PM_LOCK);
10697 10698 hba->pm_busy = 1;
10698 10699 mutex_exit(&EMLXS_PM_LOCK);
10699 10700
10700 10701 return (rval);
10701 10702 }
10702 10703 }
10703 10704
10704 10705 return (DDI_SUCCESS);
10705 10706
10706 10707 } /* emlxs_pm_idle_component() */
10707 10708
10708 10709
10709 10710 extern void
10710 10711 emlxs_pm_idle_timer(emlxs_hba_t *hba)
10711 10712 {
10712 10713 emlxs_config_t *cfg = &CFG;
10713 10714
10714 10715 if (hba->pm_active) {
10715 10716 /* Clear active flag and reset idle timer */
10716 10717 mutex_enter(&EMLXS_PM_LOCK);
10717 10718 hba->pm_active = 0;
10718 10719 hba->pm_idle_timer =
10719 10720 hba->timer_tics + cfg[CFG_PM_IDLE].current;
10720 10721 mutex_exit(&EMLXS_PM_LOCK);
10721 10722 }
10722 10723
10723 10724 /* Check for idle timeout */
10724 10725 else if (hba->timer_tics >= hba->pm_idle_timer) {
10725 10726 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
10726 10727 mutex_enter(&EMLXS_PM_LOCK);
10727 10728 hba->pm_idle_timer =
10728 10729 hba->timer_tics + cfg[CFG_PM_IDLE].current;
10729 10730 mutex_exit(&EMLXS_PM_LOCK);
10730 10731 }
10731 10732 }
10732 10733
10733 10734 return;
10734 10735
10735 10736 } /* emlxs_pm_idle_timer() */
10736 10737
10737 10738 #endif /* IDLE_TIMER */
10738 10739
10739 10740
10740 10741 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
10741 10742 static void
10742 10743 emlxs_read_vport_prop(emlxs_hba_t *hba)
10743 10744 {
10744 10745 emlxs_port_t *port = &PPORT;
10745 10746 emlxs_config_t *cfg = &CFG;
10746 10747 char **arrayp;
10747 10748 uint8_t *s;
10748 10749 uint8_t *np;
10749 10750 NAME_TYPE pwwpn;
10750 10751 NAME_TYPE wwnn;
10751 10752 NAME_TYPE wwpn;
10752 10753 uint32_t vpi;
10753 10754 uint32_t cnt;
10754 10755 uint32_t rval;
10755 10756 uint32_t i;
10756 10757 uint32_t j;
10757 10758 uint32_t c1;
10758 10759 uint32_t sum;
10759 10760 uint32_t errors;
10760 10761 char buffer[64];
10761 10762
10762 10763 /* Check for the per adapter vport setting */
10763 10764 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst);
10764 10765 cnt = 0;
10765 10766 arrayp = NULL;
10766 10767 rval =
10767 10768 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10768 10769 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
10769 10770
10770 10771 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10771 10772 /* Check for the global vport setting */
10772 10773 cnt = 0;
10773 10774 arrayp = NULL;
10774 10775 rval =
10775 10776 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
10776 10777 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
10777 10778 }
10778 10779
10779 10780 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
10780 10781 return;
10781 10782 }
10782 10783
10783 10784 for (i = 0; i < cnt; i++) {
10784 10785 errors = 0;
10785 10786 s = (uint8_t *)arrayp[i];
10786 10787
10787 10788 if (!s) {
10788 10789 break;
10789 10790 }
10790 10791
10791 10792 np = (uint8_t *)&pwwpn;
10792 10793 for (j = 0; j < sizeof (NAME_TYPE); j++) {
10793 10794 c1 = *s++;
10794 10795 if ((c1 >= '0') && (c1 <= '9')) {
10795 10796 sum = ((c1 - '0') << 4);
10796 10797 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10797 10798 sum = ((c1 - 'a' + 10) << 4);
10798 10799 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10799 10800 sum = ((c1 - 'A' + 10) << 4);
10800 10801 } else {
10801 10802 EMLXS_MSGF(EMLXS_CONTEXT,
10802 10803 &emlxs_attach_debug_msg,
10803 10804 "Config error: Invalid PWWPN found. "
10804 10805 "entry=%d byte=%d hi_nibble=%c",
10805 10806 i, j, c1);
10806 10807 errors++;
10807 10808 }
10808 10809
10809 10810 c1 = *s++;
10810 10811 if ((c1 >= '0') && (c1 <= '9')) {
10811 10812 sum |= (c1 - '0');
10812 10813 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10813 10814 sum |= (c1 - 'a' + 10);
10814 10815 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10815 10816 sum |= (c1 - 'A' + 10);
10816 10817 } else {
10817 10818 EMLXS_MSGF(EMLXS_CONTEXT,
10818 10819 &emlxs_attach_debug_msg,
10819 10820 "Config error: Invalid PWWPN found. "
10820 10821 "entry=%d byte=%d lo_nibble=%c",
10821 10822 i, j, c1);
10822 10823 errors++;
10823 10824 }
10824 10825
10825 10826 *np++ = (uint8_t)sum;
10826 10827 }
10827 10828
10828 10829 if (*s++ != ':') {
10829 10830 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10830 10831 "Config error: Invalid delimiter after PWWPN. "
10831 10832 "entry=%d", i);
10832 10833 goto out;
10833 10834 }
10834 10835
10835 10836 np = (uint8_t *)&wwnn;
10836 10837 for (j = 0; j < sizeof (NAME_TYPE); j++) {
10837 10838 c1 = *s++;
10838 10839 if ((c1 >= '0') && (c1 <= '9')) {
10839 10840 sum = ((c1 - '0') << 4);
10840 10841 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10841 10842 sum = ((c1 - 'a' + 10) << 4);
10842 10843 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10843 10844 sum = ((c1 - 'A' + 10) << 4);
10844 10845 } else {
10845 10846 EMLXS_MSGF(EMLXS_CONTEXT,
10846 10847 &emlxs_attach_debug_msg,
10847 10848 "Config error: Invalid WWNN found. "
10848 10849 "entry=%d byte=%d hi_nibble=%c",
10849 10850 i, j, c1);
10850 10851 errors++;
10851 10852 }
10852 10853
10853 10854 c1 = *s++;
10854 10855 if ((c1 >= '0') && (c1 <= '9')) {
10855 10856 sum |= (c1 - '0');
10856 10857 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10857 10858 sum |= (c1 - 'a' + 10);
10858 10859 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10859 10860 sum |= (c1 - 'A' + 10);
10860 10861 } else {
10861 10862 EMLXS_MSGF(EMLXS_CONTEXT,
10862 10863 &emlxs_attach_debug_msg,
10863 10864 "Config error: Invalid WWNN found. "
10864 10865 "entry=%d byte=%d lo_nibble=%c",
10865 10866 i, j, c1);
10866 10867 errors++;
10867 10868 }
10868 10869
10869 10870 *np++ = (uint8_t)sum;
10870 10871 }
10871 10872
10872 10873 if (*s++ != ':') {
10873 10874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10874 10875 "Config error: Invalid delimiter after WWNN. "
10875 10876 "entry=%d", i);
10876 10877 goto out;
10877 10878 }
10878 10879
10879 10880 np = (uint8_t *)&wwpn;
10880 10881 for (j = 0; j < sizeof (NAME_TYPE); j++) {
10881 10882 c1 = *s++;
10882 10883 if ((c1 >= '0') && (c1 <= '9')) {
10883 10884 sum = ((c1 - '0') << 4);
10884 10885 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10885 10886 sum = ((c1 - 'a' + 10) << 4);
10886 10887 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10887 10888 sum = ((c1 - 'A' + 10) << 4);
10888 10889 } else {
10889 10890 EMLXS_MSGF(EMLXS_CONTEXT,
10890 10891 &emlxs_attach_debug_msg,
10891 10892 "Config error: Invalid WWPN found. "
10892 10893 "entry=%d byte=%d hi_nibble=%c",
10893 10894 i, j, c1);
10894 10895
10895 10896 errors++;
10896 10897 }
10897 10898
10898 10899 c1 = *s++;
10899 10900 if ((c1 >= '0') && (c1 <= '9')) {
10900 10901 sum |= (c1 - '0');
10901 10902 } else if ((c1 >= 'a') && (c1 <= 'f')) {
10902 10903 sum |= (c1 - 'a' + 10);
10903 10904 } else if ((c1 >= 'A') && (c1 <= 'F')) {
10904 10905 sum |= (c1 - 'A' + 10);
10905 10906 } else {
10906 10907 EMLXS_MSGF(EMLXS_CONTEXT,
10907 10908 &emlxs_attach_debug_msg,
10908 10909 "Config error: Invalid WWPN found. "
10909 10910 "entry=%d byte=%d lo_nibble=%c",
10910 10911 i, j, c1);
10911 10912
10912 10913 errors++;
10913 10914 }
10914 10915
10915 10916 *np++ = (uint8_t)sum;
10916 10917 }
10917 10918
10918 10919 if (*s++ != ':') {
10919 10920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
10920 10921 "Config error: Invalid delimiter after WWPN. "
10921 10922 "entry=%d", i);
10922 10923
10923 10924 goto out;
10924 10925 }
10925 10926
10926 10927 sum = 0;
10927 10928 do {
10928 10929 c1 = *s++;
10929 10930 if ((c1 < '0') || (c1 > '9')) {
10930 10931 EMLXS_MSGF(EMLXS_CONTEXT,
10931 10932 &emlxs_attach_debug_msg,
10932 10933 "Config error: Invalid VPI found. "
10933 10934 "entry=%d c=%c vpi=%d", i, c1, sum);
10934 10935
10935 10936 goto out;
10936 10937 }
10937 10938
10938 10939 sum = (sum * 10) + (c1 - '0');
10939 10940
10940 10941 } while (*s != 0);
10941 10942
10942 10943 vpi = sum;
10943 10944
10944 10945 if (errors) {
10945 10946 continue;
10946 10947 }
10947 10948
10948 10949 /* Entry has been read */
10949 10950
10950 10951 /* Check if the physical port wwpn */
10951 10952 /* matches our physical port wwpn */
10952 10953 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
10953 10954 continue;
10954 10955 }
10955 10956
10956 10957 /* Check vpi range */
10957 10958 if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
10958 10959 continue;
10959 10960 }
10960 10961
10961 10962 /* Check if port has already been configured */
10962 10963 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
10963 10964 continue;
10964 10965 }
10965 10966
10966 10967 /* Set the highest configured vpi */
10967 10968 if (vpi > hba->vpi_high) {
10968 10969 hba->vpi_high = vpi;
10969 10970 }
10970 10971
10971 10972 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
10972 10973 sizeof (NAME_TYPE));
10973 10974 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
10974 10975 sizeof (NAME_TYPE));
10975 10976
10976 10977 if (hba->port[vpi].snn[0] == 0) {
10977 10978 (void) strncpy((caddr_t)hba->port[vpi].snn,
10978 10979 (caddr_t)hba->snn, 256);
10979 10980 }
10980 10981
10981 10982 if (hba->port[vpi].spn[0] == 0) {
10982 10983 (void) sprintf((caddr_t)hba->port[vpi].spn,
10983 10984 "%s VPort-%d",
10984 10985 (caddr_t)hba->spn, vpi);
10985 10986 }
10986 10987
10987 10988 hba->port[vpi].flag |=
10988 10989 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE);
10989 10990
10990 10991 if (cfg[CFG_VPORT_RESTRICTED].current) {
10991 10992 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
10992 10993 }
10993 10994 }
10994 10995
10995 10996 out:
10996 10997
10997 10998 (void) ddi_prop_free((void *) arrayp);
10998 10999 return;
10999 11000
11000 11001 } /* emlxs_read_vport_prop() */
11001 11002 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
11002 11003
11003 11004
11004 11005 extern char *
11005 11006 emlxs_wwn_xlate(char *buffer, uint8_t *wwn)
11006 11007 {
11007 11008 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x",
11008 11009 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11009 11010 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11010 11011
11011 11012 return (buffer);
11012 11013
11013 11014 } /* emlxs_wwn_xlate() */
11014 11015
11015 11016
11016 11017 /* This is called at port online and offline */
11017 11018 extern void
11018 11019 emlxs_ub_flush(emlxs_port_t *port)
11019 11020 {
11020 11021 emlxs_hba_t *hba = HBA;
11021 11022 fc_unsol_buf_t *ubp;
11022 11023 emlxs_ub_priv_t *ub_priv;
11023 11024 emlxs_ub_priv_t *next;
11024 11025
11025 11026 /* Return if nothing to do */
11026 11027 if (!port->ub_wait_head) {
11027 11028 return;
11028 11029 }
11029 11030
11030 11031 mutex_enter(&EMLXS_PORT_LOCK);
11031 11032 ub_priv = port->ub_wait_head;
11032 11033 port->ub_wait_head = NULL;
11033 11034 port->ub_wait_tail = NULL;
11034 11035 mutex_exit(&EMLXS_PORT_LOCK);
11035 11036
11036 11037 while (ub_priv) {
11037 11038 next = ub_priv->next;
11038 11039 ubp = ub_priv->ubp;
11039 11040
11040 11041 /* Check if ULP is online and we have a callback function */
11041 11042 if ((port->ulp_statec != FC_STATE_OFFLINE) &&
11042 11043 port->ulp_unsol_cb) {
11043 11044 /* Send ULP the ub buffer */
11044 11045 port->ulp_unsol_cb(port->ulp_handle, ubp,
11045 11046 ubp->ub_frame.type);
11046 11047 } else { /* Drop the buffer */
11047 11048
11048 11049 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11049 11050 }
11050 11051
11051 11052 ub_priv = next;
11052 11053
11053 11054 } /* while () */
11054 11055
11055 11056 return;
11056 11057
11057 11058 } /* emlxs_ub_flush() */
11058 11059
11059 11060
11060 11061 extern void
11061 11062 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11062 11063 {
11063 11064 emlxs_hba_t *hba = HBA;
11064 11065 emlxs_ub_priv_t *ub_priv;
11065 11066
11066 11067 ub_priv = ubp->ub_fca_private;
11067 11068
11068 11069 /* Check if ULP is online */
11069 11070 if (port->ulp_statec != FC_STATE_OFFLINE) {
11070 11071 if (port->ulp_unsol_cb) {
11071 11072 port->ulp_unsol_cb(port->ulp_handle, ubp,
11072 11073 ubp->ub_frame.type);
11073 11074 } else {
11074 11075 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11075 11076 }
11076 11077
11077 11078 return;
11078 11079 } else { /* ULP offline */
11079 11080
11080 11081 if (hba->state >= FC_LINK_UP) {
11081 11082 /* Add buffer to queue tail */
11082 11083 mutex_enter(&EMLXS_PORT_LOCK);
11083 11084
11084 11085 if (port->ub_wait_tail) {
11085 11086 port->ub_wait_tail->next = ub_priv;
11086 11087 }
11087 11088 port->ub_wait_tail = ub_priv;
11088 11089
11089 11090 if (!port->ub_wait_head) {
11090 11091 port->ub_wait_head = ub_priv;
11091 11092 }
11092 11093
11093 11094 mutex_exit(&EMLXS_PORT_LOCK);
11094 11095 } else {
11095 11096 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11096 11097 }
11097 11098 }
11098 11099
11099 11100 return;
11100 11101
11101 11102 } /* emlxs_ub_callback() */
11102 11103
11103 11104
11104 11105 static uint32_t
11105 11106 emlxs_integrity_check(emlxs_hba_t *hba)
11106 11107 {
11107 11108 uint32_t size;
11108 11109 uint32_t errors = 0;
11109 11110 int ddiinst = hba->ddiinst;
11110 11111
11111 11112 size = 16;
11112 11113 if (sizeof (ULP_BDL) != size) {
11113 11114 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16",
11114 11115 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11115 11116
11116 11117 errors++;
11117 11118 }
11118 11119 size = 8;
11119 11120 if (sizeof (ULP_BDE) != size) {
11120 11121 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8",
11121 11122 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11122 11123
11123 11124 errors++;
11124 11125 }
11125 11126 size = 12;
11126 11127 if (sizeof (ULP_BDE64) != size) {
11127 11128 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12",
11128 11129 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11129 11130
11130 11131 errors++;
11131 11132 }
11132 11133 size = 16;
11133 11134 if (sizeof (HBQE_t) != size) {
11134 11135 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16",
11135 11136 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11136 11137
11137 11138 errors++;
11138 11139 }
11139 11140 size = 8;
11140 11141 if (sizeof (HGP) != size) {
11141 11142 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8",
11142 11143 DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11143 11144
11144 11145 errors++;
11145 11146 }
11146 11147 if (sizeof (PGP) != size) {
11147 11148 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8",
11148 11149 DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11149 11150
11150 11151 errors++;
11151 11152 }
11152 11153 size = 4;
11153 11154 if (sizeof (WORD5) != size) {
11154 11155 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4",
11155 11156 DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11156 11157
11157 11158 errors++;
11158 11159 }
11159 11160 size = 124;
11160 11161 if (sizeof (MAILVARIANTS) != size) {
11161 11162 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. "
11162 11163 "%d != 124", DRIVER_NAME, ddiinst,
11163 11164 (int)sizeof (MAILVARIANTS));
11164 11165
11165 11166 errors++;
11166 11167 }
11167 11168 size = 128;
11168 11169 if (sizeof (SLI1_DESC) != size) {
11169 11170 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128",
11170 11171 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11171 11172
11172 11173 errors++;
11173 11174 }
11174 11175 if (sizeof (SLI2_DESC) != size) {
11175 11176 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128",
11176 11177 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11177 11178
11178 11179 errors++;
11179 11180 }
11180 11181 size = MBOX_SIZE;
11181 11182 if (sizeof (MAILBOX) != size) {
11182 11183 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d",
11183 11184 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11184 11185
11185 11186 errors++;
11186 11187 }
11187 11188 size = PCB_SIZE;
11188 11189 if (sizeof (PCB) != size) {
11189 11190 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d",
11190 11191 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11191 11192
11192 11193 errors++;
11193 11194 }
11194 11195 size = 260;
11195 11196 if (sizeof (ATTRIBUTE_ENTRY) != size) {
11196 11197 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. "
11197 11198 "%d != 260", DRIVER_NAME, ddiinst,
11198 11199 (int)sizeof (ATTRIBUTE_ENTRY));
11199 11200
11200 11201 errors++;
11201 11202 }
11202 11203 size = SLI_SLIM1_SIZE;
11203 11204 if (sizeof (SLIM1) != size) {
11204 11205 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d",
11205 11206 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
11206 11207
11207 11208 errors++;
11208 11209 }
11209 11210 size = SLI3_IOCB_CMD_SIZE;
11210 11211 if (sizeof (IOCB) != size) {
11211 11212 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d",
11212 11213 DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
11213 11214 SLI3_IOCB_CMD_SIZE);
11214 11215
11215 11216 errors++;
11216 11217 }
11217 11218
11218 11219 size = SLI_SLIM2_SIZE;
11219 11220 if (sizeof (SLIM2) != size) {
11220 11221 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d",
11221 11222 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
11222 11223 SLI_SLIM2_SIZE);
11223 11224
11224 11225 errors++;
11225 11226 }
11226 11227 return (errors);
11227 11228
11228 11229 } /* emlxs_integrity_check() */
11229 11230
11230 11231
11231 11232 #ifdef FMA_SUPPORT
11232 11233 /*
11233 11234 * FMA support
11234 11235 */
11235 11236
11236 11237 extern void
11237 11238 emlxs_fm_init(emlxs_hba_t *hba)
11238 11239 {
11239 11240 ddi_iblock_cookie_t iblk;
11240 11241
11241 11242 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11242 11243 return;
11243 11244 }
11244 11245
11245 11246 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11246 11247 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11247 11248 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
11248 11249 }
11249 11250
11250 11251 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
11251 11252 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
11252 11253 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
11253 11254 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
11254 11255 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
11255 11256 } else {
11256 11257 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11257 11258 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11258 11259 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11259 11260 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
11260 11261 }
11261 11262
11262 11263 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
11263 11264
11264 11265 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11265 11266 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11266 11267 pci_ereport_setup(hba->dip);
11267 11268 }
11268 11269
11269 11270 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11270 11271 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
11271 11272 (void *)hba);
11272 11273 }
11273 11274
11274 11275 } /* emlxs_fm_init() */
11275 11276
11276 11277
11277 11278 extern void
11278 11279 emlxs_fm_fini(emlxs_hba_t *hba)
11279 11280 {
11280 11281 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
11281 11282 return;
11282 11283 }
11283 11284
11284 11285 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
11285 11286 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11286 11287 pci_ereport_teardown(hba->dip);
11287 11288 }
11288 11289
11289 11290 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
11290 11291 ddi_fm_handler_unregister(hba->dip);
11291 11292 }
11292 11293
11293 11294 (void) ddi_fm_fini(hba->dip);
11294 11295
11295 11296 } /* emlxs_fm_fini() */
11296 11297
11297 11298
11298 11299 extern int
11299 11300 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
11300 11301 {
11301 11302 ddi_fm_error_t err;
11302 11303
11303 11304 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11304 11305 return (DDI_FM_OK);
11305 11306 }
11306 11307
11307 11308 /* Some S10 versions do not define the ahi_err structure */
11308 11309 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
11309 11310 return (DDI_FM_OK);
11310 11311 }
11311 11312
11312 11313 err.fme_status = DDI_FM_OK;
11313 11314 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
11314 11315
11315 11316 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */
11316 11317 if ((void *)&ddi_fm_acc_err_clear != NULL) {
11317 11318 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
11318 11319 }
11319 11320
11320 11321 return (err.fme_status);
11321 11322
11322 11323 } /* emlxs_fm_check_acc_handle() */
11323 11324
11324 11325
11325 11326 extern int
11326 11327 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
11327 11328 {
11328 11329 ddi_fm_error_t err;
11329 11330
11330 11331 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
11331 11332 return (DDI_FM_OK);
11332 11333 }
11333 11334
11334 11335 err.fme_status = DDI_FM_OK;
11335 11336 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
11336 11337
11337 11338 return (err.fme_status);
11338 11339
11339 11340 } /* emlxs_fm_check_dma_handle() */
11340 11341
11341 11342
11342 11343 extern void
11343 11344 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
11344 11345 {
11345 11346 uint64_t ena;
11346 11347 char buf[FM_MAX_CLASS];
11347 11348
11348 11349 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11349 11350 return;
11350 11351 }
11351 11352
11352 11353 if (detail == NULL) {
11353 11354 return;
11354 11355 }
11355 11356
11356 11357 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
11357 11358 ena = fm_ena_generate(0, FM_ENA_FMT1);
11358 11359
11359 11360 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
11360 11361 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
11361 11362
11362 11363 } /* emlxs_fm_ereport() */
11363 11364
11364 11365
11365 11366 extern void
11366 11367 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
11367 11368 {
11368 11369 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
11369 11370 return;
11370 11371 }
11371 11372
11372 11373 if (impact == NULL) {
11373 11374 return;
11374 11375 }
11375 11376
11376 11377 if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
11377 11378 (impact == DDI_SERVICE_DEGRADED)) {
11378 11379 impact = DDI_SERVICE_UNAFFECTED;
11379 11380 }
11380 11381
11381 11382 ddi_fm_service_impact(hba->dip, impact);
11382 11383
11383 11384 return;
11384 11385
11385 11386 } /* emlxs_fm_service_impact() */
11386 11387
11387 11388
11388 11389 /*
11389 11390 * The I/O fault service error handling callback function
11390 11391 */
11391 11392 /*ARGSUSED*/
11392 11393 extern int
11393 11394 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
11394 11395 const void *impl_data)
11395 11396 {
11396 11397 /*
11397 11398 * as the driver can always deal with an error
11398 11399 * in any dma or access handle, we can just return
11399 11400 * the fme_status value.
11400 11401 */
11401 11402 pci_ereport_post(dip, err, NULL);
11402 11403 return (err->fme_status);
11403 11404
11404 11405 } /* emlxs_fm_error_cb() */
11405 11406
11406 11407 extern void
11407 11408 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
11408 11409 {
11409 11410 emlxs_port_t *port = sbp->port;
11410 11411 fc_packet_t *pkt = PRIV2PKT(sbp);
11411 11412
11412 11413 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
11413 11414 if (emlxs_fm_check_dma_handle(hba,
11414 11415 hba->sli.sli4.slim2.dma_handle)
11415 11416 != DDI_FM_OK) {
11416 11417 EMLXS_MSGF(EMLXS_CONTEXT,
11417 11418 &emlxs_invalid_dma_handle_msg,
11418 11419 "slim2: hdl=%p",
11419 11420 hba->sli.sli4.slim2.dma_handle);
11420 11421
11421 11422 mutex_enter(&EMLXS_PORT_LOCK);
11422 11423 hba->flag |= FC_DMA_CHECK_ERROR;
11423 11424 mutex_exit(&EMLXS_PORT_LOCK);
11424 11425 }
11425 11426 } else {
11426 11427 if (emlxs_fm_check_dma_handle(hba,
11427 11428 hba->sli.sli3.slim2.dma_handle)
11428 11429 != DDI_FM_OK) {
11429 11430 EMLXS_MSGF(EMLXS_CONTEXT,
11430 11431 &emlxs_invalid_dma_handle_msg,
11431 11432 "slim2: hdl=%p",
11432 11433 hba->sli.sli3.slim2.dma_handle);
11433 11434
11434 11435 mutex_enter(&EMLXS_PORT_LOCK);
11435 11436 hba->flag |= FC_DMA_CHECK_ERROR;
11436 11437 mutex_exit(&EMLXS_PORT_LOCK);
11437 11438 }
11438 11439 }
11439 11440
11440 11441 if (hba->flag & FC_DMA_CHECK_ERROR) {
11441 11442 pkt->pkt_state = FC_PKT_TRAN_ERROR;
11442 11443 pkt->pkt_reason = FC_REASON_DMA_ERROR;
11443 11444 pkt->pkt_expln = FC_EXPLN_NONE;
11444 11445 pkt->pkt_action = FC_ACTION_RETRYABLE;
11445 11446 return;
11446 11447 }
11447 11448
11448 11449 if (pkt->pkt_cmdlen) {
11449 11450 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
11450 11451 != DDI_FM_OK) {
11451 11452 EMLXS_MSGF(EMLXS_CONTEXT,
11452 11453 &emlxs_invalid_dma_handle_msg,
11453 11454 "pkt_cmd_dma: hdl=%p",
11454 11455 pkt->pkt_cmd_dma);
11455 11456
11456 11457 pkt->pkt_state = FC_PKT_TRAN_ERROR;
11457 11458 pkt->pkt_reason = FC_REASON_DMA_ERROR;
11458 11459 pkt->pkt_expln = FC_EXPLN_NONE;
11459 11460 pkt->pkt_action = FC_ACTION_RETRYABLE;
11460 11461
11461 11462 return;
11462 11463 }
11463 11464 }
11464 11465
11465 11466 if (pkt->pkt_rsplen) {
11466 11467 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
11467 11468 != DDI_FM_OK) {
11468 11469 EMLXS_MSGF(EMLXS_CONTEXT,
11469 11470 &emlxs_invalid_dma_handle_msg,
11470 11471 "pkt_resp_dma: hdl=%p",
11471 11472 pkt->pkt_resp_dma);
11472 11473
11473 11474 pkt->pkt_state = FC_PKT_TRAN_ERROR;
11474 11475 pkt->pkt_reason = FC_REASON_DMA_ERROR;
11475 11476 pkt->pkt_expln = FC_EXPLN_NONE;
11476 11477 pkt->pkt_action = FC_ACTION_RETRYABLE;
11477 11478
11478 11479 return;
11479 11480 }
11480 11481 }
11481 11482
11482 11483 if (pkt->pkt_datalen) {
11483 11484 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
11484 11485 != DDI_FM_OK) {
11485 11486 EMLXS_MSGF(EMLXS_CONTEXT,
11486 11487 &emlxs_invalid_dma_handle_msg,
11487 11488 "pkt_data_dma: hdl=%p",
11488 11489 pkt->pkt_data_dma);
11489 11490
11490 11491 pkt->pkt_state = FC_PKT_TRAN_ERROR;
11491 11492 pkt->pkt_reason = FC_REASON_DMA_ERROR;
11492 11493 pkt->pkt_expln = FC_EXPLN_NONE;
11493 11494 pkt->pkt_action = FC_ACTION_RETRYABLE;
11494 11495
11495 11496 return;
11496 11497 }
11497 11498 }
11498 11499
11499 11500 return;
11500 11501
11501 11502 }
11502 11503 #endif /* FMA_SUPPORT */
11503 11504
11504 11505
11505 11506 extern void
11506 11507 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
11507 11508 {
11508 11509 uint32_t word;
11509 11510 uint32_t *wptr;
11510 11511 uint32_t i;
11511 11512
11512 11513 wptr = (uint32_t *)buffer;
11513 11514
11514 11515 size += (size%4)? (4-(size%4)):0;
11515 11516 for (i = 0; i < size / 4; i++) {
11516 11517 word = *wptr;
11517 11518 *wptr++ = SWAP32(word);
11518 11519 }
11519 11520
11520 11521 return;
11521 11522
11522 11523 } /* emlxs_swap32_buffer() */
11523 11524
11524 11525
11525 11526 extern void
11526 11527 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
11527 11528 {
11528 11529 uint32_t word;
11529 11530 uint32_t *sptr;
11530 11531 uint32_t *dptr;
11531 11532 uint32_t i;
11532 11533
11533 11534 sptr = (uint32_t *)src;
11534 11535 dptr = (uint32_t *)dst;
11535 11536
11536 11537 size += (size%4)? (4-(size%4)):0;
11537 11538 for (i = 0; i < size / 4; i++) {
11538 11539 word = *sptr++;
11539 11540 *dptr++ = SWAP32(word);
11540 11541 }
11541 11542
11542 11543 return;
11543 11544
11544 11545 } /* emlxs_swap32_buffer() */
↓ open down ↓ |
10434 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX