Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mega_sas/megaraid_sas.c
+++ new/usr/src/uts/common/io/mega_sas/megaraid_sas.c
1 1 /*
2 2 * megaraid_sas.c: source for mega_sas driver
3 3 *
4 4 * MegaRAID device driver for SAS controllers
5 5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 11 * Seokmann Ju
12 12 *
13 13 * Redistribution and use in source and binary forms, with or without
14 14 * modification, are permitted provided that the following conditions are met:
15 15 *
16 16 * 1. Redistributions of source code must retain the above copyright notice,
17 17 * this list of conditions and the following disclaimer.
18 18 *
19 19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 20 * this list of conditions and the following disclaimer in the documentation
21 21 * and/or other materials provided with the distribution.
22 22 *
23 23 * 3. Neither the name of the author nor the names of its contributors may be
24 24 * used to endorse or promote products derived from this software without
25 25 * specific prior written permission.
26 26 *
27 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 38 * DAMAGE.
39 39 */
40 40
41 41 /*
42 42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 43 * Use is subject to license terms.
44 44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
45 45 */
46 46
47 47 #include <sys/types.h>
48 48 #include <sys/param.h>
49 49 #include <sys/file.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/open.h>
52 52 #include <sys/cred.h>
53 53 #include <sys/modctl.h>
54 54 #include <sys/conf.h>
55 55 #include <sys/devops.h>
56 56 #include <sys/cmn_err.h>
57 57 #include <sys/kmem.h>
58 58 #include <sys/stat.h>
59 59 #include <sys/mkdev.h>
60 60 #include <sys/pci.h>
61 61 #include <sys/scsi/scsi.h>
62 62 #include <sys/ddi.h>
63 63 #include <sys/sunddi.h>
64 64 #include <sys/atomic.h>
65 65 #include <sys/signal.h>
66 66
67 67 #include "megaraid_sas.h"
68 68
69 69 /*
70 70 * FMA header files
71 71 */
72 72 #include <sys/ddifm.h>
73 73 #include <sys/fm/protocol.h>
74 74 #include <sys/fm/util.h>
75 75 #include <sys/fm/io/ddi.h>
76 76
77 77 /*
78 78 * Local static data
79 79 */
80 80 static void *megasas_state = NULL;
81 81 static int debug_level_g = CL_ANN;
82 82
83 83 #pragma weak scsi_hba_open
84 84 #pragma weak scsi_hba_close
85 85 #pragma weak scsi_hba_ioctl
86 86
87 87 static ddi_dma_attr_t megasas_generic_dma_attr = {
88 88 DMA_ATTR_V0, /* dma_attr_version */
89 89 0, /* low DMA address range */
90 90 0xFFFFFFFFU, /* high DMA address range */
91 91 0xFFFFFFFFU, /* DMA counter register */
92 92 8, /* DMA address alignment */
93 93 0x07, /* DMA burstsizes */
94 94 1, /* min DMA size */
95 95 0xFFFFFFFFU, /* max DMA size */
96 96 0xFFFFFFFFU, /* segment boundary */
97 97 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */
98 98 512, /* granularity of device */
99 99 0 /* bus specific DMA flags */
100 100 };
101 101
102 102 int32_t megasas_max_cap_maxxfer = 0x1000000;
103 103
104 104 /*
105 105 * cb_ops contains base level routines
106 106 */
107 107 static struct cb_ops megasas_cb_ops = {
108 108 megasas_open, /* open */
109 109 megasas_close, /* close */
110 110 nodev, /* strategy */
111 111 nodev, /* print */
112 112 nodev, /* dump */
113 113 nodev, /* read */
114 114 nodev, /* write */
115 115 megasas_ioctl, /* ioctl */
116 116 nodev, /* devmap */
117 117 nodev, /* mmap */
118 118 nodev, /* segmap */
119 119 nochpoll, /* poll */
120 120 nodev, /* cb_prop_op */
121 121 0, /* streamtab */
122 122 D_NEW | D_HOTPLUG, /* cb_flag */
123 123 CB_REV, /* cb_rev */
124 124 nodev, /* cb_aread */
125 125 nodev /* cb_awrite */
126 126 };
127 127
128 128 /*
129 129 * dev_ops contains configuration routines
130 130 */
131 131 static struct dev_ops megasas_ops = {
132 132 DEVO_REV, /* rev, */
133 133 0, /* refcnt */
134 134 megasas_getinfo, /* getinfo */
135 135 nulldev, /* identify */
136 136 nulldev, /* probe */
137 137 megasas_attach, /* attach */
138 138 megasas_detach, /* detach */
139 139 megasas_reset, /* reset */
140 140 &megasas_cb_ops, /* char/block ops */
141 141 NULL, /* bus ops */
142 142 NULL, /* power */
143 143 ddi_quiesce_not_supported, /* devo_quiesce */
144 144 };
145 145
146 146 static struct modldrv modldrv = {
147 147 &mod_driverops, /* module type - driver */
148 148 MEGASAS_VERSION,
149 149 &megasas_ops, /* driver ops */
150 150 };
151 151
152 152 static struct modlinkage modlinkage = {
153 153 MODREV_1, /* ml_rev - must be MODREV_1 */
154 154 &modldrv, /* ml_linkage */
155 155 NULL /* end of driver linkage */
156 156 };
157 157
158 158 static struct ddi_device_acc_attr endian_attr = {
159 159 DDI_DEVICE_ATTR_V1,
160 160 DDI_STRUCTURE_LE_ACC,
161 161 DDI_STRICTORDER_ACC,
162 162 DDI_DEFAULT_ACC
163 163 };
164 164
165 165
166 166 /*
167 167 * ************************************************************************** *
168 168 * *
169 169 * common entry points - for loadable kernel modules *
170 170 * *
171 171 * ************************************************************************** *
172 172 */
173 173
174 174 /*
175 175 * _init - initialize a loadable module
176 176 * @void
177 177 *
178 178 * The driver should perform any one-time resource allocation or data
179 179 * initialization during driver loading in _init(). For example, the driver
180 180 * should initialize any mutexes global to the driver in this routine.
181 181 * The driver should not, however, use _init() to allocate or initialize
182 182 * anything that has to do with a particular instance of the device.
183 183 * Per-instance initialization must be done in attach().
184 184 */
185 185 int
186 186 _init(void)
187 187 {
188 188 int ret;
189 189
190 190 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
191 191
192 192 ret = ddi_soft_state_init(&megasas_state,
193 193 sizeof (struct megasas_instance), 0);
194 194
195 195 if (ret != 0) {
196 196 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
197 197 return (ret);
198 198 }
199 199
200 200 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
201 201 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
202 202 ddi_soft_state_fini(&megasas_state);
203 203 return (ret);
204 204 }
205 205
206 206 ret = mod_install(&modlinkage);
207 207
208 208 if (ret != 0) {
209 209 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
210 210 scsi_hba_fini(&modlinkage);
211 211 ddi_soft_state_fini(&megasas_state);
212 212 }
213 213
214 214 return (ret);
215 215 }
216 216
217 217 /*
218 218 * _info - returns information about a loadable module.
219 219 * @void
220 220 *
221 221 * _info() is called to return module information. This is a typical entry
222 222 * point that does predefined role. It simply calls mod_info().
223 223 */
224 224 int
225 225 _info(struct modinfo *modinfop)
226 226 {
227 227 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
228 228
229 229 return (mod_info(&modlinkage, modinfop));
230 230 }
231 231
232 232 /*
233 233 * _fini - prepare a loadable module for unloading
234 234 * @void
235 235 *
236 236 * In _fini(), the driver should release any resources that were allocated in
237 237 * _init(). The driver must remove itself from the system module list.
238 238 */
239 239 int
240 240 _fini(void)
241 241 {
242 242 int ret;
243 243
244 244 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
245 245
246 246 if ((ret = mod_remove(&modlinkage)) != 0)
247 247 return (ret);
248 248
249 249 scsi_hba_fini(&modlinkage);
250 250
251 251 ddi_soft_state_fini(&megasas_state);
252 252
253 253 return (ret);
254 254 }
255 255
256 256
257 257 /*
258 258 * ************************************************************************** *
259 259 * *
260 260 * common entry points - for autoconfiguration *
261 261 * *
262 262 * ************************************************************************** *
263 263 */
264 264 /*
265 265 * attach - adds a device to the system as part of initialization
266 266 * @dip:
267 267 * @cmd:
268 268 *
269 269 * The kernel calls a driver's attach() entry point to attach an instance of
270 270 * a device (for MegaRAID, it is instance of a controller) or to resume
271 271 * operation for an instance of a device that has been suspended or has been
272 272 * shut down by the power management framework
273 273 * The attach() entry point typically includes the following types of
274 274 * processing:
275 275 * - allocate a soft-state structure for the device instance (for MegaRAID,
276 276 * controller instance)
277 277 * - initialize per-instance mutexes
278 278 * - initialize condition variables
279 279 * - register the device's interrupts (for MegaRAID, controller's interrupts)
280 280 * - map the registers and memory of the device instance (for MegaRAID,
281 281 * controller instance)
282 282 * - create minor device nodes for the device instance (for MegaRAID,
283 283 * controller instance)
284 284 * - report that the device instance (for MegaRAID, controller instance) has
285 285 * attached
286 286 */
287 287 static int
288 288 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
289 289 {
290 290 int instance_no;
291 291 int nregs;
292 292 uint8_t added_isr_f = 0;
293 293 uint8_t added_soft_isr_f = 0;
294 294 uint8_t create_devctl_node_f = 0;
295 295 uint8_t create_scsi_node_f = 0;
296 296 uint8_t create_ioc_node_f = 0;
297 297 uint8_t tran_alloc_f = 0;
298 298 uint8_t irq;
299 299 uint16_t vendor_id;
300 300 uint16_t device_id;
301 301 uint16_t subsysvid;
302 302 uint16_t subsysid;
303 303 uint16_t command;
304 304
305 305 scsi_hba_tran_t *tran;
306 306 ddi_dma_attr_t tran_dma_attr;
307 307 struct megasas_instance *instance;
308 308
309 309 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
310 310
311 311 /* CONSTCOND */
312 312 ASSERT(NO_COMPETING_THREADS);
313 313
314 314 instance_no = ddi_get_instance(dip);
315 315
316 316 /*
317 317 * Since we know that some instantiations of this device can be
318 318 * plugged into slave-only SBus slots, check to see whether this is
319 319 * one such.
320 320 */
321 321 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
322 322 con_log(CL_ANN, (CE_WARN,
323 323 "mega%d: Device in slave-only slot, unused", instance_no));
324 324 return (DDI_FAILURE);
325 325 }
326 326
327 327 switch (cmd) {
328 328 case DDI_ATTACH:
329 329 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
330 330 /* allocate the soft state for the instance */
331 331 if (ddi_soft_state_zalloc(megasas_state, instance_no)
332 332 != DDI_SUCCESS) {
333 333 con_log(CL_ANN, (CE_WARN,
334 334 "mega%d: Failed to allocate soft state",
335 335 instance_no));
336 336
337 337 return (DDI_FAILURE);
338 338 }
339 339
340 340 instance = (struct megasas_instance *)ddi_get_soft_state
341 341 (megasas_state, instance_no);
342 342
343 343 if (instance == NULL) {
344 344 con_log(CL_ANN, (CE_WARN,
345 345 "mega%d: Bad soft state", instance_no));
346 346
347 347 ddi_soft_state_free(megasas_state, instance_no);
348 348
349 349 return (DDI_FAILURE);
350 350 }
351 351
352 352 bzero((caddr_t)instance,
353 353 sizeof (struct megasas_instance));
354 354
355 355 instance->func_ptr = kmem_zalloc(
356 356 sizeof (struct megasas_func_ptr), KM_SLEEP);
357 357 ASSERT(instance->func_ptr);
358 358
359 359 /* Setup the PCI configuration space handles */
360 360 if (pci_config_setup(dip, &instance->pci_handle) !=
361 361 DDI_SUCCESS) {
362 362 con_log(CL_ANN, (CE_WARN,
363 363 "mega%d: pci config setup failed ",
364 364 instance_no));
365 365
366 366 kmem_free(instance->func_ptr,
367 367 sizeof (struct megasas_func_ptr));
368 368 ddi_soft_state_free(megasas_state, instance_no);
369 369
370 370 return (DDI_FAILURE);
371 371 }
372 372
373 373 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
374 374 con_log(CL_ANN, (CE_WARN,
375 375 "megaraid: failed to get registers."));
376 376
377 377 pci_config_teardown(&instance->pci_handle);
378 378 kmem_free(instance->func_ptr,
379 379 sizeof (struct megasas_func_ptr));
380 380 ddi_soft_state_free(megasas_state, instance_no);
381 381
382 382 return (DDI_FAILURE);
383 383 }
384 384
385 385 vendor_id = pci_config_get16(instance->pci_handle,
386 386 PCI_CONF_VENID);
387 387 device_id = pci_config_get16(instance->pci_handle,
388 388 PCI_CONF_DEVID);
389 389
390 390 subsysvid = pci_config_get16(instance->pci_handle,
391 391 PCI_CONF_SUBVENID);
392 392 subsysid = pci_config_get16(instance->pci_handle,
393 393 PCI_CONF_SUBSYSID);
394 394
395 395 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
396 396 (pci_config_get16(instance->pci_handle,
397 397 PCI_CONF_COMM) | PCI_COMM_ME));
398 398 irq = pci_config_get8(instance->pci_handle,
399 399 PCI_CONF_ILINE);
400 400
401 401 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
402 402 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
403 403 instance_no, vendor_id, device_id, subsysvid,
404 404 subsysid, irq, MEGASAS_VERSION));
405 405
406 406 /* enable bus-mastering */
407 407 command = pci_config_get16(instance->pci_handle,
408 408 PCI_CONF_COMM);
409 409
410 410 if (!(command & PCI_COMM_ME)) {
411 411 command |= PCI_COMM_ME;
412 412
413 413 pci_config_put16(instance->pci_handle,
414 414 PCI_CONF_COMM, command);
415 415
416 416 con_log(CL_ANN, (CE_CONT, "megaraid%d: "
417 417 "enable bus-mastering\n", instance_no));
418 418 } else {
419 419 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
420 420 "bus-mastering already set\n", instance_no));
421 421 }
422 422
423 423 /* initialize function pointers */
424 424 if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
425 425 (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
426 426 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
427 427 "1078R/DE detected\n", instance_no));
428 428 instance->func_ptr->read_fw_status_reg =
429 429 read_fw_status_reg_ppc;
430 430 instance->func_ptr->issue_cmd = issue_cmd_ppc;
431 431 instance->func_ptr->issue_cmd_in_sync_mode =
432 432 issue_cmd_in_sync_mode_ppc;
433 433 instance->func_ptr->issue_cmd_in_poll_mode =
434 434 issue_cmd_in_poll_mode_ppc;
435 435 instance->func_ptr->enable_intr =
436 436 enable_intr_ppc;
437 437 instance->func_ptr->disable_intr =
438 438 disable_intr_ppc;
439 439 instance->func_ptr->intr_ack = intr_ack_ppc;
440 440 } else {
441 441 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
442 442 "1064/8R detected\n", instance_no));
443 443 instance->func_ptr->read_fw_status_reg =
444 444 read_fw_status_reg_xscale;
445 445 instance->func_ptr->issue_cmd =
446 446 issue_cmd_xscale;
447 447 instance->func_ptr->issue_cmd_in_sync_mode =
448 448 issue_cmd_in_sync_mode_xscale;
449 449 instance->func_ptr->issue_cmd_in_poll_mode =
450 450 issue_cmd_in_poll_mode_xscale;
451 451 instance->func_ptr->enable_intr =
452 452 enable_intr_xscale;
453 453 instance->func_ptr->disable_intr =
454 454 disable_intr_xscale;
455 455 instance->func_ptr->intr_ack =
456 456 intr_ack_xscale;
457 457 }
458 458
459 459 instance->baseaddress = pci_config_get32(
460 460 instance->pci_handle, PCI_CONF_BASE0);
461 461 instance->baseaddress &= 0x0fffc;
462 462
463 463 instance->dip = dip;
464 464 instance->vendor_id = vendor_id;
465 465 instance->device_id = device_id;
466 466 instance->subsysvid = subsysvid;
467 467 instance->subsysid = subsysid;
468 468
469 469 /* Initialize FMA */
470 470 instance->fm_capabilities = ddi_prop_get_int(
471 471 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
472 472 "fm-capable", DDI_FM_EREPORT_CAPABLE |
473 473 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
474 474 | DDI_FM_ERRCB_CAPABLE);
475 475
476 476 megasas_fm_init(instance);
477 477
478 478 /* setup the mfi based low level driver */
479 479 if (init_mfi(instance) != DDI_SUCCESS) {
480 480 con_log(CL_ANN, (CE_WARN, "megaraid: "
481 481 "could not initialize the low level driver"));
482 482
483 483 goto fail_attach;
484 484 }
485 485
486 486 /*
487 487 * Allocate the interrupt blocking cookie.
488 488 * It represents the information the framework
489 489 * needs to block interrupts. This cookie will
490 490 * be used by the locks shared accross our ISR.
491 491 * These locks must be initialized before we
492 492 * register our ISR.
493 493 * ddi_add_intr(9F)
494 494 */
495 495 if (ddi_get_iblock_cookie(dip, 0,
496 496 &instance->iblock_cookie) != DDI_SUCCESS) {
497 497
498 498 goto fail_attach;
499 499 }
500 500
501 501 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
502 502 &instance->soft_iblock_cookie) != DDI_SUCCESS) {
503 503
504 504 goto fail_attach;
505 505 }
506 506
507 507 /*
508 508 * Initialize the driver mutexes common to
509 509 * normal/high level isr
510 510 */
511 511 if (ddi_intr_hilevel(dip, 0)) {
512 512 instance->isr_level = HIGH_LEVEL_INTR;
513 513 mutex_init(&instance->cmd_pool_mtx,
514 514 "cmd_pool_mtx", MUTEX_DRIVER,
515 515 instance->soft_iblock_cookie);
516 516 mutex_init(&instance->cmd_pend_mtx,
517 517 "cmd_pend_mtx", MUTEX_DRIVER,
518 518 instance->soft_iblock_cookie);
519 519 } else {
520 520 /*
521 521 * Initialize the driver mutexes
522 522 * specific to soft-isr
523 523 */
524 524 instance->isr_level = NORMAL_LEVEL_INTR;
525 525 mutex_init(&instance->cmd_pool_mtx,
526 526 "cmd_pool_mtx", MUTEX_DRIVER,
527 527 instance->iblock_cookie);
528 528 mutex_init(&instance->cmd_pend_mtx,
529 529 "cmd_pend_mtx", MUTEX_DRIVER,
530 530 instance->iblock_cookie);
531 531 }
532 532
533 533 mutex_init(&instance->completed_pool_mtx,
534 534 "completed_pool_mtx", MUTEX_DRIVER,
535 535 instance->iblock_cookie);
536 536 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
537 537 MUTEX_DRIVER, instance->iblock_cookie);
538 538 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
539 539 MUTEX_DRIVER, instance->iblock_cookie);
540 540 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
541 541 MUTEX_DRIVER, instance->iblock_cookie);
542 542
543 543 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
544 544 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
545 545
546 546 INIT_LIST_HEAD(&instance->completed_pool_list);
547 547
548 548 /* Register our isr. */
549 549 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
550 550 (caddr_t)instance) != DDI_SUCCESS) {
551 551 con_log(CL_ANN, (CE_WARN,
552 552 " ISR did not register"));
553 553
554 554 goto fail_attach;
555 555 }
556 556
557 557 added_isr_f = 1;
558 558
559 559 /* Register our soft-isr for highlevel interrupts. */
560 560 if (instance->isr_level == HIGH_LEVEL_INTR) {
561 561 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
562 562 &instance->soft_intr_id, NULL, NULL,
563 563 megasas_softintr, (caddr_t)instance) !=
564 564 DDI_SUCCESS) {
565 565 con_log(CL_ANN, (CE_WARN,
566 566 " Software ISR did not register"));
567 567
568 568 goto fail_attach;
569 569 }
570 570
571 571 added_soft_isr_f = 1;
572 572 }
573 573
574 574 /* Allocate a transport structure */
575 575 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
576 576
577 577 if (tran == NULL) {
578 578 con_log(CL_ANN, (CE_WARN,
579 579 "scsi_hba_tran_alloc failed"));
580 580 goto fail_attach;
581 581 }
582 582
583 583 tran_alloc_f = 1;
584 584
585 585 instance->tran = tran;
586 586
587 587 tran->tran_hba_private = instance;
588 588 tran->tran_tgt_private = NULL;
589 589 tran->tran_tgt_init = megasas_tran_tgt_init;
590 590 tran->tran_tgt_probe = scsi_hba_probe;
591 591 tran->tran_tgt_free = (void (*)())NULL;
592 592 tran->tran_init_pkt = megasas_tran_init_pkt;
593 593 tran->tran_start = megasas_tran_start;
594 594 tran->tran_abort = megasas_tran_abort;
595 595 tran->tran_reset = megasas_tran_reset;
596 596 tran->tran_bus_reset = megasas_tran_bus_reset;
597 597 tran->tran_getcap = megasas_tran_getcap;
598 598 tran->tran_setcap = megasas_tran_setcap;
599 599 tran->tran_destroy_pkt = megasas_tran_destroy_pkt;
600 600 tran->tran_dmafree = megasas_tran_dmafree;
601 601 tran->tran_sync_pkt = megasas_tran_sync_pkt;
602 602 tran->tran_reset_notify = NULL;
603 603 tran->tran_quiesce = megasas_tran_quiesce;
604 604 tran->tran_unquiesce = megasas_tran_unquiesce;
605 605
606 606 tran_dma_attr = megasas_generic_dma_attr;
607 607 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
608 608
609 609 /* Attach this instance of the hba */
610 610 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
611 611 != DDI_SUCCESS) {
612 612 con_log(CL_ANN, (CE_WARN,
613 613 "scsi_hba_attach failed\n"));
614 614
615 615 goto fail_attach;
616 616 }
617 617
618 618 /* create devctl node for cfgadm command */
619 619 if (ddi_create_minor_node(dip, "devctl",
620 620 S_IFCHR, INST2DEVCTL(instance_no),
621 621 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
622 622 con_log(CL_ANN, (CE_WARN,
623 623 "megaraid: failed to create devctl node."));
624 624
625 625 goto fail_attach;
626 626 }
627 627
628 628 create_devctl_node_f = 1;
629 629
630 630 /* create scsi node for cfgadm command */
631 631 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
632 632 INST2SCSI(instance_no),
633 633 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
634 634 DDI_FAILURE) {
635 635 con_log(CL_ANN, (CE_WARN,
636 636 "megaraid: failed to create scsi node."));
637 637
638 638 goto fail_attach;
639 639 }
640 640
641 641 create_scsi_node_f = 1;
642 642
643 643 (void) sprintf(instance->iocnode, "%d:lsirdctl",
644 644 instance_no);
645 645
646 646 /*
647 647 * Create a node for applications
648 648 * for issuing ioctl to the driver.
649 649 */
650 650 if (ddi_create_minor_node(dip, instance->iocnode,
651 651 S_IFCHR, INST2LSIRDCTL(instance_no),
652 652 DDI_PSEUDO, 0) == DDI_FAILURE) {
653 653 con_log(CL_ANN, (CE_WARN,
654 654 "megaraid: failed to create ioctl node."));
655 655
656 656 goto fail_attach;
657 657 }
658 658
659 659 create_ioc_node_f = 1;
660 660
661 661 /* enable interrupt */
662 662 instance->func_ptr->enable_intr(instance);
663 663
664 664 /* initiate AEN */
665 665 if (start_mfi_aen(instance)) {
666 666 con_log(CL_ANN, (CE_WARN,
667 667 "megaraid: failed to initiate AEN."));
668 668 goto fail_initiate_aen;
669 669 }
670 670
671 671 con_log(CL_DLEVEL1, (CE_NOTE,
672 672 "AEN started for instance %d.", instance_no));
673 673
674 674 /* Finally! We are on the air. */
675 675 ddi_report_dev(dip);
676 676
677 677 if (megasas_check_acc_handle(instance->regmap_handle) !=
678 678 DDI_SUCCESS) {
679 679 goto fail_attach;
680 680 }
681 681 if (megasas_check_acc_handle(instance->pci_handle) !=
682 682 DDI_SUCCESS) {
683 683 goto fail_attach;
684 684 }
685 685 break;
686 686 case DDI_PM_RESUME:
687 687 con_log(CL_ANN, (CE_NOTE,
688 688 "megasas: DDI_PM_RESUME"));
689 689 break;
690 690 case DDI_RESUME:
691 691 con_log(CL_ANN, (CE_NOTE,
692 692 "megasas: DDI_RESUME"));
693 693 break;
694 694 default:
695 695 con_log(CL_ANN, (CE_WARN,
696 696 "megasas: invalid attach cmd=%x", cmd));
697 697 return (DDI_FAILURE);
698 698 }
699 699
700 700 return (DDI_SUCCESS);
701 701
702 702 fail_initiate_aen:
703 703 fail_attach:
704 704 if (create_devctl_node_f) {
705 705 ddi_remove_minor_node(dip, "devctl");
706 706 }
707 707
708 708 if (create_scsi_node_f) {
709 709 ddi_remove_minor_node(dip, "scsi");
710 710 }
711 711
712 712 if (create_ioc_node_f) {
713 713 ddi_remove_minor_node(dip, instance->iocnode);
714 714 }
715 715
716 716 if (tran_alloc_f) {
717 717 scsi_hba_tran_free(tran);
718 718 }
719 719
720 720
721 721 if (added_soft_isr_f) {
722 722 ddi_remove_softintr(instance->soft_intr_id);
723 723 }
724 724
725 725 if (added_isr_f) {
726 726 ddi_remove_intr(dip, 0, instance->iblock_cookie);
727 727 }
728 728
729 729 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
730 730 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
731 731
732 732 megasas_fm_fini(instance);
733 733
734 734 pci_config_teardown(&instance->pci_handle);
735 735
736 736 ddi_soft_state_free(megasas_state, instance_no);
737 737
738 738 con_log(CL_ANN, (CE_NOTE,
739 739 "megasas: return failure from mega_attach\n"));
740 740
741 741 return (DDI_FAILURE);
742 742 }
743 743
744 744 /*
745 745 * getinfo - gets device information
746 746 * @dip:
747 747 * @cmd:
748 748 * @arg:
749 749 * @resultp:
750 750 *
751 751 * The system calls getinfo() to obtain configuration information that only
752 752 * the driver knows. The mapping of minor numbers to device instance is
753 753 * entirely under the control of the driver. The system sometimes needs to ask
754 754 * the driver which device a particular dev_t represents.
755 755 * Given the device number return the devinfo pointer from the scsi_device
756 756 * structure.
757 757 */
758 758 /*ARGSUSED*/
759 759 static int
760 760 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
761 761 {
762 762 int rval;
763 763 int megasas_minor = getminor((dev_t)arg);
764 764
765 765 struct megasas_instance *instance;
766 766
767 767 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
768 768
769 769 switch (cmd) {
770 770 case DDI_INFO_DEVT2DEVINFO:
771 771 instance = (struct megasas_instance *)
772 772 ddi_get_soft_state(megasas_state,
773 773 MINOR2INST(megasas_minor));
774 774
775 775 if (instance == NULL) {
776 776 *resultp = NULL;
777 777 rval = DDI_FAILURE;
778 778 } else {
779 779 *resultp = instance->dip;
780 780 rval = DDI_SUCCESS;
781 781 }
782 782 break;
783 783 case DDI_INFO_DEVT2INSTANCE:
784 784 *resultp = (void *)instance;
785 785 rval = DDI_SUCCESS;
786 786 break;
787 787 default:
788 788 *resultp = NULL;
789 789 rval = DDI_FAILURE;
790 790 }
791 791
792 792 return (rval);
793 793 }
794 794
795 795 /*
796 796 * detach - detaches a device from the system
797 797 * @dip: pointer to the device's dev_info structure
798 798 * @cmd: type of detach
799 799 *
800 800 * A driver's detach() entry point is called to detach an instance of a device
801 801 * that is bound to the driver. The entry point is called with the instance of
802 802 * the device node to be detached and with DDI_DETACH, which is specified as
803 803 * the cmd argument to the entry point.
804 804 * This routine is called during driver unload. We free all the allocated
805 805 * resources and call the corresponding LLD so that it can also release all
806 806 * its resources.
807 807 */
808 808 static int
809 809 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
810 810 {
811 811 int instance_no;
812 812
813 813 struct megasas_instance *instance;
814 814
815 815 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
816 816
817 817 /* CONSTCOND */
818 818 ASSERT(NO_COMPETING_THREADS);
819 819
820 820 instance_no = ddi_get_instance(dip);
821 821
822 822 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
823 823 instance_no);
824 824
825 825 if (!instance) {
826 826 con_log(CL_ANN, (CE_WARN,
827 827 "megasas:%d could not get instance in detach",
828 828 instance_no));
829 829
830 830 return (DDI_FAILURE);
831 831 }
832 832
833 833 con_log(CL_ANN, (CE_NOTE,
834 834 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
835 835 instance_no, instance->vendor_id, instance->device_id,
836 836 instance->subsysvid, instance->subsysid));
837 837
838 838 switch (cmd) {
839 839 case DDI_DETACH:
840 840 con_log(CL_ANN, (CE_NOTE,
841 841 "megasas_detach: DDI_DETACH\n"));
842 842
843 843 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
844 844 con_log(CL_ANN, (CE_WARN,
845 845 "megasas:%d failed to detach",
846 846 instance_no));
847 847
848 848 return (DDI_FAILURE);
849 849 }
850 850
851 851 scsi_hba_tran_free(instance->tran);
852 852
853 853 if (abort_aen_cmd(instance, instance->aen_cmd)) {
854 854 con_log(CL_ANN, (CE_WARN, "megasas_detach: "
855 855 "failed to abort prevous AEN command\n"));
856 856
857 857 return (DDI_FAILURE);
858 858 }
859 859
860 860 instance->func_ptr->disable_intr(instance);
861 861
862 862 if (instance->isr_level == HIGH_LEVEL_INTR) {
863 863 ddi_remove_softintr(instance->soft_intr_id);
864 864 }
865 865
866 866 ddi_remove_intr(dip, 0, instance->iblock_cookie);
867 867
868 868 free_space_for_mfi(instance);
869 869
870 870 megasas_fm_fini(instance);
871 871
872 872 pci_config_teardown(&instance->pci_handle);
873 873
874 874 kmem_free(instance->func_ptr,
875 875 sizeof (struct megasas_func_ptr));
876 876
877 877 ddi_soft_state_free(megasas_state, instance_no);
878 878 break;
879 879 case DDI_PM_SUSPEND:
880 880 con_log(CL_ANN, (CE_NOTE,
881 881 "megasas_detach: DDI_PM_SUSPEND\n"));
882 882
883 883 break;
884 884 case DDI_SUSPEND:
885 885 con_log(CL_ANN, (CE_NOTE,
886 886 "megasas_detach: DDI_SUSPEND\n"));
887 887
888 888 break;
889 889 default:
890 890 con_log(CL_ANN, (CE_WARN,
891 891 "invalid detach command:0x%x", cmd));
892 892 return (DDI_FAILURE);
893 893 }
894 894
895 895 return (DDI_SUCCESS);
896 896 }
897 897
898 898 /*
899 899 * ************************************************************************** *
900 900 * *
901 901 * common entry points - for character driver types *
902 902 * *
903 903 * ************************************************************************** *
904 904 */
905 905 /*
906 906 * open - gets access to a device
907 907 * @dev:
908 908 * @openflags:
909 909 * @otyp:
910 910 * @credp:
911 911 *
912 912 * Access to a device by one or more application programs is controlled
913 913 * through the open() and close() entry points. The primary function of
914 914 * open() is to verify that the open request is allowed.
915 915 */
916 916 static int
917 917 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
918 918 {
919 919 int rval = 0;
920 920
921 921 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
922 922
923 923 /* Check root permissions */
924 924 if (drv_priv(credp) != 0) {
925 925 con_log(CL_ANN, (CE_WARN,
926 926 "megaraid: Non-root ioctl access tried!"));
927 927 return (EPERM);
928 928 }
929 929
930 930 /* Verify we are being opened as a character device */
931 931 if (otyp != OTYP_CHR) {
932 932 con_log(CL_ANN, (CE_WARN,
933 933 "megaraid: ioctl node must be a char node\n"));
934 934 return (EINVAL);
935 935 }
936 936
937 937 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
938 938 == NULL) {
939 939 return (ENXIO);
940 940 }
941 941
942 942 if (scsi_hba_open) {
943 943 rval = scsi_hba_open(dev, openflags, otyp, credp);
944 944 }
945 945
946 946 return (rval);
947 947 }
948 948
949 949 /*
950 950 * close - gives up access to a device
951 951 * @dev:
952 952 * @openflags:
953 953 * @otyp:
954 954 * @credp:
955 955 *
956 956 * close() should perform any cleanup necessary to finish using the minor
957 957 * device, and prepare the device (and driver) to be opened again.
958 958 */
959 959 static int
960 960 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
961 961 {
962 962 int rval = 0;
963 963
964 964 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
965 965
966 966 /* no need for locks! */
967 967
968 968 if (scsi_hba_close) {
969 969 rval = scsi_hba_close(dev, openflags, otyp, credp);
970 970 }
971 971
972 972 return (rval);
973 973 }
974 974
975 975 /*
976 976 * ioctl - performs a range of I/O commands for character drivers
977 977 * @dev:
978 978 * @cmd:
979 979 * @arg:
980 980 * @mode:
981 981 * @credp:
982 982 * @rvalp:
983 983 *
984 984 * ioctl() routine must make sure that user data is copied into or out of the
985 985 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
986 986 * and ddi_copyout(), as appropriate.
987 987 * This is a wrapper routine to serialize access to the actual ioctl routine.
988 988 * ioctl() should return 0 on success, or the appropriate error number. The
989 989 * driver may also set the value returned to the calling process through rvalp.
990 990 */
991 991 static int
992 992 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
993 993 int *rvalp)
994 994 {
995 995 int rval = 0;
996 996
997 997 struct megasas_instance *instance;
998 998 struct megasas_ioctl ioctl;
999 999 struct megasas_aen aen;
1000 1000
1001 1001 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1002 1002
1003 1003 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1004 1004
1005 1005 if (instance == NULL) {
1006 1006 /* invalid minor number */
1007 1007 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1008 1008 return (ENXIO);
1009 1009 }
1010 1010
1011 1011 switch ((uint_t)cmd) {
1012 1012 case MEGASAS_IOCTL_FIRMWARE:
1013 1013 if (ddi_copyin((void *) arg, &ioctl,
1014 1014 sizeof (struct megasas_ioctl), mode)) {
1015 1015 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1016 1016 "ERROR IOCTL copyin"));
1017 1017 return (EFAULT);
1018 1018 }
1019 1019
1020 1020 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1021 1021 rval = handle_drv_ioctl(instance, &ioctl, mode);
1022 1022 } else {
1023 1023 rval = handle_mfi_ioctl(instance, &ioctl, mode);
1024 1024 }
1025 1025
1026 1026 if (ddi_copyout((void *) &ioctl, (void *)arg,
1027 1027 (sizeof (struct megasas_ioctl) - 1), mode)) {
1028 1028 con_log(CL_ANN, (CE_WARN,
1029 1029 "megasas_ioctl: copy_to_user failed\n"));
1030 1030 rval = 1;
1031 1031 }
1032 1032
1033 1033 break;
1034 1034 case MEGASAS_IOCTL_AEN:
1035 1035 if (ddi_copyin((void *) arg, &aen,
1036 1036 sizeof (struct megasas_aen), mode)) {
1037 1037 con_log(CL_ANN, (CE_WARN,
1038 1038 "megasas_ioctl: ERROR AEN copyin"));
1039 1039 return (EFAULT);
1040 1040 }
1041 1041
1042 1042 rval = handle_mfi_aen(instance, &aen);
1043 1043
1044 1044 if (ddi_copyout((void *) &aen, (void *)arg,
1045 1045 sizeof (struct megasas_aen), mode)) {
1046 1046 con_log(CL_ANN, (CE_WARN,
1047 1047 "megasas_ioctl: copy_to_user failed\n"));
1048 1048 rval = 1;
1049 1049 }
1050 1050
1051 1051 break;
1052 1052 default:
1053 1053 rval = scsi_hba_ioctl(dev, cmd, arg,
1054 1054 mode, credp, rvalp);
1055 1055
1056 1056 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1057 1057 "scsi_hba_ioctl called, ret = %x.", rval));
1058 1058 }
1059 1059
1060 1060 return (rval);
1061 1061 }
1062 1062
1063 1063 /*
1064 1064 * ************************************************************************** *
1065 1065 * *
1066 1066 * common entry points - for block driver types *
1067 1067 * *
1068 1068 * ************************************************************************** *
1069 1069 */
1070 1070 /*
1071 1071 * reset - TBD
1072 1072 * @dip:
1073 1073 * @cmd:
1074 1074 *
1075 1075 * TBD
1076 1076 */
1077 1077 /*ARGSUSED*/
1078 1078 static int
1079 1079 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1080 1080 {
1081 1081 int instance_no;
1082 1082
1083 1083 struct megasas_instance *instance;
1084 1084
1085 1085 instance_no = ddi_get_instance(dip);
1086 1086 instance = (struct megasas_instance *)ddi_get_soft_state
1087 1087 (megasas_state, instance_no);
1088 1088
1089 1089 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1090 1090
1091 1091 if (!instance) {
1092 1092 con_log(CL_ANN, (CE_WARN,
1093 1093 "megaraid:%d could not get adapter in reset",
1094 1094 instance_no));
1095 1095 return (DDI_FAILURE);
1096 1096 }
1097 1097
1098 1098 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1099 1099 instance_no));
1100 1100
1101 1101 flush_cache(instance);
1102 1102
1103 1103 return (DDI_SUCCESS);
1104 1104 }
1105 1105
1106 1106
1107 1107 /*
1108 1108 * ************************************************************************** *
1109 1109 * *
1110 1110 * entry points (SCSI HBA) *
1111 1111 * *
1112 1112 * ************************************************************************** *
1113 1113 */
1114 1114 /*
1115 1115 * tran_tgt_init - initialize a target device instance
1116 1116 * @hba_dip:
1117 1117 * @tgt_dip:
1118 1118 * @tran:
1119 1119 * @sd:
1120 1120 *
1121 1121 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1122 1122 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1123 1123 * the device's address as valid and supportable for that particular HBA.
1124 1124 * By returning DDI_FAILURE, the instance of the target driver for that device
1125 1125 * is not probed or attached.
1126 1126 */
1127 1127 /*ARGSUSED*/
1128 1128 static int
1129 1129 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1130 1130 scsi_hba_tran_t *tran, struct scsi_device *sd)
1131 1131 {
1132 1132 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 1133
1134 1134 return (DDI_SUCCESS);
1135 1135 }
1136 1136
1137 1137 /*
1138 1138 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1139 1139 * @ap:
1140 1140 * @pkt:
1141 1141 * @bp:
1142 1142 * @cmdlen:
1143 1143 * @statuslen:
1144 1144 * @tgtlen:
1145 1145 * @flags:
1146 1146 * @callback:
1147 1147 *
1148 1148 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1149 1149 * structure and DMA resources for a target driver request. The
1150 1150 * tran_init_pkt() entry point is called when the target driver calls the
1151 1151 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1152 1152 * is a request to perform one or more of three possible services:
1153 1153 * - allocation and initialization of a scsi_pkt structure
1154 1154 * - allocation of DMA resources for data transfer
1155 1155 * - reallocation of DMA resources for the next portion of the data transfer
1156 1156 */
1157 1157 static struct scsi_pkt *
1158 1158 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1159 1159 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1160 1160 int flags, int (*callback)(), caddr_t arg)
1161 1161 {
1162 1162 struct scsa_cmd *acmd;
1163 1163 struct megasas_instance *instance;
1164 1164 struct scsi_pkt *new_pkt;
1165 1165
1166 1166 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1167 1167
1168 1168 instance = ADDR2MEGA(ap);
1169 1169
1170 1170 /* step #1 : pkt allocation */
1171 1171 if (pkt == NULL) {
1172 1172 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1173 1173 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1174 1174 if (pkt == NULL) {
1175 1175 return (NULL);
1176 1176 }
1177 1177
1178 1178 acmd = PKT2CMD(pkt);
1179 1179
1180 1180 /*
1181 1181 * Initialize the new pkt - we redundantly initialize
1182 1182 * all the fields for illustrative purposes.
1183 1183 */
1184 1184 acmd->cmd_pkt = pkt;
1185 1185 acmd->cmd_flags = 0;
1186 1186 acmd->cmd_scblen = statuslen;
1187 1187 acmd->cmd_cdblen = cmdlen;
1188 1188 acmd->cmd_dmahandle = NULL;
1189 1189 acmd->cmd_ncookies = 0;
1190 1190 acmd->cmd_cookie = 0;
1191 1191 acmd->cmd_cookiecnt = 0;
1192 1192 acmd->cmd_nwin = 0;
1193 1193
1194 1194 pkt->pkt_address = *ap;
1195 1195 pkt->pkt_comp = (void (*)())NULL;
1196 1196 pkt->pkt_flags = 0;
1197 1197 pkt->pkt_time = 0;
1198 1198 pkt->pkt_resid = 0;
1199 1199 pkt->pkt_state = 0;
1200 1200 pkt->pkt_statistics = 0;
1201 1201 pkt->pkt_reason = 0;
1202 1202 new_pkt = pkt;
1203 1203 } else {
1204 1204 acmd = PKT2CMD(pkt);
1205 1205 new_pkt = NULL;
1206 1206 }
1207 1207
1208 1208 /* step #2 : dma allocation/move */
1209 1209 if (bp && bp->b_bcount != 0) {
1210 1210 if (acmd->cmd_dmahandle == NULL) {
1211 1211 if (megasas_dma_alloc(instance, pkt, bp, flags,
1212 1212 callback) == -1) {
1213 1213 if (new_pkt) {
1214 1214 scsi_hba_pkt_free(ap, new_pkt);
1215 1215 }
1216 1216
1217 1217 return ((struct scsi_pkt *)NULL);
1218 1218 }
1219 1219 } else {
1220 1220 if (megasas_dma_move(instance, pkt, bp) == -1) {
1221 1221 return ((struct scsi_pkt *)NULL);
1222 1222 }
1223 1223 }
1224 1224 }
1225 1225
1226 1226 return (pkt);
1227 1227 }
1228 1228
1229 1229 /*
1230 1230 * tran_start - transport a SCSI command to the addressed target
1231 1231 * @ap:
1232 1232 * @pkt:
1233 1233 *
1234 1234 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1235 1235 * SCSI command to the addressed target. The SCSI command is described
1236 1236 * entirely within the scsi_pkt structure, which the target driver allocated
1237 1237 * through the HBA driver's tran_init_pkt() entry point. If the command
1238 1238 * involves a data transfer, DMA resources must also have been allocated for
1239 1239 * the scsi_pkt structure.
1240 1240 *
1241 1241 * Return Values :
1242 1242 * TRAN_BUSY - request queue is full, no more free scbs
1243 1243 * TRAN_ACCEPT - pkt has been submitted to the instance
1244 1244 */
1245 1245 static int
1246 1246 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1247 1247 {
1248 1248 uchar_t cmd_done = 0;
1249 1249
1250 1250 struct megasas_instance *instance = ADDR2MEGA(ap);
1251 1251 struct megasas_cmd *cmd;
1252 1252
1253 1253 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1254 1254 __func__, __LINE__, pkt->pkt_cdbp[0]));
1255 1255
1256 1256 pkt->pkt_reason = CMD_CMPLT;
1257 1257 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1258 1258
1259 1259 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1260 1260
1261 1261 /*
1262 1262 * Check if the command is already completed by the mega_build_cmd()
1263 1263 * routine. In which case the busy_flag would be clear and scb will be
1264 1264 * NULL and appropriate reason provided in pkt_reason field
1265 1265 */
1266 1266 if (cmd_done) {
1267 1267 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1268 1268 scsi_hba_pkt_comp(pkt);
1269 1269 }
1270 1270 pkt->pkt_reason = CMD_CMPLT;
1271 1271 pkt->pkt_scbp[0] = STATUS_GOOD;
1272 1272 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1273 1273 | STATE_SENT_CMD;
1274 1274 return (TRAN_ACCEPT);
1275 1275 }
1276 1276
1277 1277 if (cmd == NULL) {
1278 1278 return (TRAN_BUSY);
1279 1279 }
1280 1280
1281 1281 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1282 1282 if (instance->fw_outstanding > instance->max_fw_cmds) {
1283 1283 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1284 1284 return_mfi_pkt(instance, cmd);
1285 1285 return (TRAN_BUSY);
1286 1286 }
1287 1287
1288 1288 /* Syncronize the Cmd frame for the controller */
1289 1289 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1290 1290 DDI_DMA_SYNC_FORDEV);
1291 1291
1292 1292 instance->func_ptr->issue_cmd(cmd, instance);
1293 1293
1294 1294 } else {
1295 1295 struct megasas_header *hdr = &cmd->frame->hdr;
1296 1296
1297 1297 cmd->sync_cmd = MEGASAS_TRUE;
1298 1298
1299 1299 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1300 1300
1301 1301 pkt->pkt_reason = CMD_CMPLT;
1302 1302 pkt->pkt_statistics = 0;
1303 1303 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1304 1304
1305 1305 switch (hdr->cmd_status) {
1306 1306 case MFI_STAT_OK:
1307 1307 pkt->pkt_scbp[0] = STATUS_GOOD;
1308 1308 break;
1309 1309
1310 1310 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1311 1311
1312 1312 pkt->pkt_reason = CMD_CMPLT;
1313 1313 pkt->pkt_statistics = 0;
1314 1314
1315 1315 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1316 1316 break;
1317 1317
1318 1318 case MFI_STAT_DEVICE_NOT_FOUND:
1319 1319 pkt->pkt_reason = CMD_DEV_GONE;
1320 1320 pkt->pkt_statistics = STAT_DISCON;
1321 1321 break;
1322 1322
1323 1323 default:
1324 1324 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1325 1325 }
1326 1326
1327 1327 return_mfi_pkt(instance, cmd);
1328 1328 (void) megasas_common_check(instance, cmd);
1329 1329
1330 1330 scsi_hba_pkt_comp(pkt);
1331 1331
1332 1332 }
1333 1333
1334 1334 return (TRAN_ACCEPT);
1335 1335 }
1336 1336
1337 1337 /*
1338 1338 * tran_abort - Abort any commands that are currently in transport
1339 1339 * @ap:
1340 1340 * @pkt:
1341 1341 *
1342 1342 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1343 1343 * commands that are currently in transport for a particular target. This entry
1344 1344 * point is called when a target driver calls scsi_abort(). The tran_abort()
1345 1345 * entry point should attempt to abort the command denoted by the pkt
1346 1346 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1347 1347 * abort all outstanding commands in the transport layer for the particular
1348 1348 * target or logical unit.
1349 1349 */
1350 1350 /*ARGSUSED*/
1351 1351 static int
1352 1352 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1353 1353 {
1354 1354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1355 1355
1356 1356 /* aborting command not supported by H/W */
1357 1357
1358 1358 return (DDI_FAILURE);
1359 1359 }
1360 1360
1361 1361 /*
1362 1362 * tran_reset - reset either the SCSI bus or target
1363 1363 * @ap:
1364 1364 * @level:
1365 1365 *
1366 1366 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1367 1367 * the SCSI bus or a particular SCSI target device. This entry point is called
1368 1368 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1369 1369 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1370 1370 * particular target or logical unit must be reset.
1371 1371 */
1372 1372 /*ARGSUSED*/
1373 1373 static int
1374 1374 megasas_tran_reset(struct scsi_address *ap, int level)
1375 1375 {
1376 1376 struct megasas_instance *instance = ADDR2MEGA(ap);
1377 1377
1378 1378 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1379 1379
1380 1380 if (wait_for_outstanding(instance)) {
1381 1381 return (DDI_FAILURE);
1382 1382 } else {
1383 1383 return (DDI_SUCCESS);
1384 1384 }
1385 1385 }
1386 1386
1387 1387 /*
1388 1388 * tran_bus_reset - reset the SCSI bus
1389 1389 * @dip:
1390 1390 * @level:
1391 1391 *
1392 1392 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1393 1393 * initialized during the HBA driver's attach(). The vector should point to
1394 1394 * an HBA entry point that is to be called when a user initiates a bus reset.
1395 1395 * Implementation is hardware specific. If the HBA driver cannot reset the
1396 1396 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1397 1397 * or not initialize this vector.
1398 1398 */
1399 1399 /*ARGSUSED*/
1400 1400 static int
1401 1401 megasas_tran_bus_reset(dev_info_t *dip, int level)
1402 1402 {
1403 1403 int instance_no = ddi_get_instance(dip);
1404 1404
1405 1405 struct megasas_instance *instance = ddi_get_soft_state(megasas_state,
1406 1406 instance_no);
1407 1407
1408 1408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1409 1409
1410 1410 if (wait_for_outstanding(instance)) {
1411 1411 return (DDI_FAILURE);
1412 1412 } else {
1413 1413 return (DDI_SUCCESS);
1414 1414 }
1415 1415 }
1416 1416
1417 1417 /*
1418 1418 * tran_getcap - get one of a set of SCSA-defined capabilities
1419 1419 * @ap:
1420 1420 * @cap:
1421 1421 * @whom:
1422 1422 *
1423 1423 * The target driver can request the current setting of the capability for a
1424 1424 * particular target by setting the whom parameter to nonzero. A whom value of
1425 1425 * zero indicates a request for the current setting of the general capability
1426 1426 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1427 1427 * for undefined capabilities or the current value of the requested capability.
1428 1428 */
1429 1429 /*ARGSUSED*/
1430 1430 static int
1431 1431 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1432 1432 {
1433 1433 int rval = 0;
1434 1434
1435 1435 struct megasas_instance *instance = ADDR2MEGA(ap);
1436 1436
1437 1437 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1438 1438
1439 1439 /* we do allow inquiring about capabilities for other targets */
1440 1440 if (cap == NULL) {
1441 1441 return (-1);
1442 1442 }
1443 1443
1444 1444 switch (scsi_hba_lookup_capstr(cap)) {
1445 1445 case SCSI_CAP_DMA_MAX:
1446 1446 /* Limit to 16MB max transfer */
1447 1447 rval = megasas_max_cap_maxxfer;
1448 1448 break;
1449 1449 case SCSI_CAP_MSG_OUT:
1450 1450 rval = 1;
1451 1451 break;
1452 1452 case SCSI_CAP_DISCONNECT:
1453 1453 rval = 0;
1454 1454 break;
1455 1455 case SCSI_CAP_SYNCHRONOUS:
1456 1456 rval = 0;
1457 1457 break;
1458 1458 case SCSI_CAP_WIDE_XFER:
1459 1459 rval = 1;
1460 1460 break;
1461 1461 case SCSI_CAP_TAGGED_QING:
1462 1462 rval = 1;
1463 1463 break;
1464 1464 case SCSI_CAP_UNTAGGED_QING:
1465 1465 rval = 1;
1466 1466 break;
1467 1467 case SCSI_CAP_PARITY:
1468 1468 rval = 1;
1469 1469 break;
1470 1470 case SCSI_CAP_INITIATOR_ID:
1471 1471 rval = instance->init_id;
1472 1472 break;
1473 1473 case SCSI_CAP_ARQ:
1474 1474 rval = 1;
1475 1475 break;
1476 1476 case SCSI_CAP_LINKED_CMDS:
1477 1477 rval = 0;
1478 1478 break;
1479 1479 case SCSI_CAP_RESET_NOTIFICATION:
1480 1480 rval = 1;
1481 1481 break;
1482 1482 case SCSI_CAP_GEOMETRY:
1483 1483 rval = -1;
1484 1484
1485 1485 break;
1486 1486 default:
1487 1487 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1488 1488 scsi_hba_lookup_capstr(cap)));
1489 1489 rval = -1;
1490 1490 break;
1491 1491 }
1492 1492
1493 1493 return (rval);
1494 1494 }
1495 1495
1496 1496 /*
1497 1497 * tran_setcap - set one of a set of SCSA-defined capabilities
1498 1498 * @ap:
1499 1499 * @cap:
1500 1500 * @value:
1501 1501 * @whom:
1502 1502 *
1503 1503 * The target driver might request that the new value be set for a particular
1504 1504 * target by setting the whom parameter to nonzero. A whom value of zero
1505 1505 * means that request is to set the new value for the SCSI bus or for adapter
1506 1506 * hardware in general.
1507 1507 * The tran_setcap() should return the following values as appropriate:
1508 1508 * - -1 for undefined capabilities
1509 1509 * - 0 if the HBA driver cannot set the capability to the requested value
1510 1510 * - 1 if the HBA driver is able to set the capability to the requested value
1511 1511 */
1512 1512 /*ARGSUSED*/
1513 1513 static int
1514 1514 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1515 1515 {
1516 1516 int rval = 1;
1517 1517
1518 1518 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1519 1519
1520 1520 /* We don't allow setting capabilities for other targets */
1521 1521 if (cap == NULL || whom == 0) {
1522 1522 return (-1);
1523 1523 }
1524 1524
1525 1525 switch (scsi_hba_lookup_capstr(cap)) {
1526 1526 case SCSI_CAP_DMA_MAX:
1527 1527 case SCSI_CAP_MSG_OUT:
1528 1528 case SCSI_CAP_PARITY:
1529 1529 case SCSI_CAP_LINKED_CMDS:
1530 1530 case SCSI_CAP_RESET_NOTIFICATION:
1531 1531 case SCSI_CAP_DISCONNECT:
1532 1532 case SCSI_CAP_SYNCHRONOUS:
1533 1533 case SCSI_CAP_UNTAGGED_QING:
1534 1534 case SCSI_CAP_WIDE_XFER:
1535 1535 case SCSI_CAP_INITIATOR_ID:
1536 1536 case SCSI_CAP_ARQ:
1537 1537 /*
1538 1538 * None of these are settable via
1539 1539 * the capability interface.
1540 1540 */
1541 1541 break;
1542 1542 case SCSI_CAP_TAGGED_QING:
1543 1543 rval = 1;
1544 1544 break;
1545 1545 case SCSI_CAP_SECTOR_SIZE:
1546 1546 rval = 1;
1547 1547 break;
1548 1548
1549 1549 case SCSI_CAP_TOTAL_SECTORS:
1550 1550 rval = 1;
1551 1551 break;
1552 1552 default:
1553 1553 rval = -1;
1554 1554 break;
1555 1555 }
1556 1556
1557 1557 return (rval);
1558 1558 }
1559 1559
1560 1560 /*
1561 1561 * tran_destroy_pkt - deallocate scsi_pkt structure
1562 1562 * @ap:
1563 1563 * @pkt:
1564 1564 *
1565 1565 * The tran_destroy_pkt() entry point is the HBA driver function that
1566 1566 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1567 1567 * called when the target driver calls scsi_destroy_pkt(). The
1568 1568 * tran_destroy_pkt() entry point must free any DMA resources that have been
1569 1569 * allocated for the packet. An implicit DMA synchronization occurs if the
1570 1570 * DMA resources are freed and any cached data remains after the completion
1571 1571 * of the transfer.
1572 1572 */
1573 1573 static void
1574 1574 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1575 1575 {
1576 1576 struct scsa_cmd *acmd = PKT2CMD(pkt);
1577 1577
1578 1578 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1579 1579
1580 1580 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1581 1581 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1582 1582
1583 1583 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1584 1584
1585 1585 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1586 1586
1587 1587 acmd->cmd_dmahandle = NULL;
1588 1588 }
1589 1589
1590 1590 /* free the pkt */
1591 1591 scsi_hba_pkt_free(ap, pkt);
1592 1592 }
1593 1593
1594 1594 /*
1595 1595 * tran_dmafree - deallocates DMA resources
1596 1596 * @ap:
1597 1597 * @pkt:
1598 1598 *
1599 1599 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1600 1600 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1601 1601 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1602 1602 * free only DMA resources allocated for a scsi_pkt structure, not the
1603 1603 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1604 1604 * implicitly performed.
1605 1605 */
1606 1606 /*ARGSUSED*/
1607 1607 static void
1608 1608 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1609 1609 {
1610 1610 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1611 1611
1612 1612 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1613 1613
1614 1614 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1615 1615 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1616 1616
1617 1617 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1618 1618
1619 1619 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1620 1620
1621 1621 acmd->cmd_dmahandle = NULL;
1622 1622 }
1623 1623 }
1624 1624
1625 1625 /*
1626 1626 * tran_sync_pkt - synchronize the DMA object allocated
1627 1627 * @ap:
1628 1628 * @pkt:
1629 1629 *
1630 1630 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1631 1631 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1632 1632 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1633 1633 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1634 1634 * must synchronize the CPU's view of the data. If the data transfer direction
1635 1635 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1636 1636 * device's view of the data.
1637 1637 */
1638 1638 /*ARGSUSED*/
1639 1639 static void
1640 1640 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1641 1641 {
1642 1642 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1643 1643
1644 1644 /*
1645 1645 * following 'ddi_dma_sync()' API call
1646 1646 * already called for each I/O in the ISR
1647 1647 */
1648 1648 #if 0
1649 1649 int i;
1650 1650
1651 1651 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1652 1652
1653 1653 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1654 1654 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1655 1655 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1656 1656 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1657 1657 }
1658 1658 #endif
1659 1659 }
1660 1660
1661 1661 /*ARGSUSED*/
1662 1662 static int
1663 1663 megasas_tran_quiesce(dev_info_t *dip)
1664 1664 {
1665 1665 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1666 1666
1667 1667 return (1);
1668 1668 }
1669 1669
1670 1670 /*ARGSUSED*/
1671 1671 static int
1672 1672 megasas_tran_unquiesce(dev_info_t *dip)
1673 1673 {
1674 1674 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1675 1675
1676 1676 return (1);
1677 1677 }
1678 1678
1679 1679 /*
1680 1680 * megasas_isr(caddr_t)
1681 1681 *
1682 1682 * The Interrupt Service Routine
1683 1683 *
1684 1684 * Collect status for all completed commands and do callback
1685 1685 *
1686 1686 */
1687 1687 static uint_t
1688 1688 megasas_isr(struct megasas_instance *instance)
1689 1689 {
1690 1690 int need_softintr;
1691 1691 uint32_t producer;
1692 1692 uint32_t consumer;
1693 1693 uint32_t context;
1694 1694
1695 1695 struct megasas_cmd *cmd;
1696 1696
1697 1697 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1698 1698
1699 1699 ASSERT(instance);
1700 1700 if (!instance->func_ptr->intr_ack(instance)) {
1701 1701 return (DDI_INTR_UNCLAIMED);
1702 1702 }
1703 1703
1704 1704 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1705 1705 0, 0, DDI_DMA_SYNC_FORCPU);
1706 1706
1707 1707 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1708 1708 != DDI_SUCCESS) {
1709 1709 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1710 1710 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1711 1711 return (DDI_INTR_UNCLAIMED);
1712 1712 }
1713 1713
1714 1714 producer = *instance->producer;
1715 1715 consumer = *instance->consumer;
1716 1716
1717 1717 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1718 1718 producer, consumer));
1719 1719
1720 1720 mutex_enter(&instance->completed_pool_mtx);
1721 1721
1722 1722 while (consumer != producer) {
1723 1723 context = instance->reply_queue[consumer];
1724 1724 cmd = instance->cmd_list[context];
1725 1725 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1726 1726
1727 1727 consumer++;
1728 1728 if (consumer == (instance->max_fw_cmds + 1)) {
1729 1729 consumer = 0;
1730 1730 }
1731 1731 }
1732 1732
1733 1733 mutex_exit(&instance->completed_pool_mtx);
1734 1734
1735 1735 *instance->consumer = consumer;
1736 1736 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1737 1737 0, 0, DDI_DMA_SYNC_FORDEV);
1738 1738
1739 1739 if (instance->softint_running) {
1740 1740 need_softintr = 0;
1741 1741 } else {
1742 1742 need_softintr = 1;
1743 1743 }
1744 1744
1745 1745 if (instance->isr_level == HIGH_LEVEL_INTR) {
1746 1746 if (need_softintr) {
1747 1747 ddi_trigger_softintr(instance->soft_intr_id);
1748 1748 }
1749 1749 } else {
1750 1750 /*
1751 1751 * Not a high-level interrupt, therefore call the soft level
1752 1752 * interrupt explicitly
1753 1753 */
1754 1754 (void) megasas_softintr(instance);
1755 1755 }
1756 1756
1757 1757 return (DDI_INTR_CLAIMED);
1758 1758 }
1759 1759
1760 1760
1761 1761 /*
1762 1762 * ************************************************************************** *
1763 1763 * *
1764 1764 * libraries *
1765 1765 * *
1766 1766 * ************************************************************************** *
1767 1767 */
1768 1768 /*
1769 1769 * get_mfi_pkt : Get a command from the free pool
1770 1770 */
1771 1771 static struct megasas_cmd *
1772 1772 get_mfi_pkt(struct megasas_instance *instance)
1773 1773 {
1774 1774 mlist_t *head = &instance->cmd_pool_list;
1775 1775 struct megasas_cmd *cmd = NULL;
1776 1776
1777 1777 mutex_enter(&instance->cmd_pool_mtx);
1778 1778 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1779 1779
1780 1780 if (!mlist_empty(head)) {
1781 1781 cmd = mlist_entry(head->next, struct megasas_cmd, list);
1782 1782 mlist_del_init(head->next);
1783 1783 }
1784 1784 if (cmd != NULL)
1785 1785 cmd->pkt = NULL;
1786 1786 mutex_exit(&instance->cmd_pool_mtx);
1787 1787
1788 1788 return (cmd);
1789 1789 }
1790 1790
1791 1791 /*
1792 1792 * return_mfi_pkt : Return a cmd to free command pool
1793 1793 */
1794 1794 static void
1795 1795 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1796 1796 {
1797 1797 mutex_enter(&instance->cmd_pool_mtx);
1798 1798 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1799 1799
1800 1800 mlist_add(&cmd->list, &instance->cmd_pool_list);
1801 1801
1802 1802 mutex_exit(&instance->cmd_pool_mtx);
1803 1803 }
1804 1804
1805 1805 /*
1806 1806 * destroy_mfi_frame_pool
1807 1807 */
1808 1808 static void
1809 1809 destroy_mfi_frame_pool(struct megasas_instance *instance)
1810 1810 {
1811 1811 int i;
1812 1812 uint32_t max_cmd = instance->max_fw_cmds;
1813 1813
1814 1814 struct megasas_cmd *cmd;
1815 1815
1816 1816 /* return all frames to pool */
1817 1817 for (i = 0; i < max_cmd; i++) {
1818 1818
1819 1819 cmd = instance->cmd_list[i];
1820 1820
1821 1821 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1822 1822 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1823 1823
1824 1824 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
1825 1825 }
1826 1826
1827 1827 }
1828 1828
1829 1829 /*
1830 1830 * create_mfi_frame_pool
1831 1831 */
1832 1832 static int
1833 1833 create_mfi_frame_pool(struct megasas_instance *instance)
1834 1834 {
1835 1835 int i = 0;
1836 1836 int cookie_cnt;
1837 1837 uint16_t max_cmd;
1838 1838 uint16_t sge_sz;
1839 1839 uint32_t sgl_sz;
1840 1840 uint32_t tot_frame_size;
1841 1841
1842 1842 struct megasas_cmd *cmd;
1843 1843
1844 1844 max_cmd = instance->max_fw_cmds;
1845 1845
1846 1846 sge_sz = sizeof (struct megasas_sge64);
1847 1847
1848 1848 /* calculated the number of 64byte frames required for SGL */
1849 1849 sgl_sz = sge_sz * instance->max_num_sge;
1850 1850 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1851 1851
1852 1852 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1853 1853 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1854 1854
1855 1855 while (i < max_cmd) {
1856 1856 cmd = instance->cmd_list[i];
1857 1857
1858 1858 cmd->frame_dma_obj.size = tot_frame_size;
1859 1859 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1860 1860 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1861 1861 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1862 1862 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1863 1863 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1864 1864
1865 1865
1866 1866 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1867 1867
1868 1868 if (cookie_cnt == -1 || cookie_cnt > 1) {
1869 1869 con_log(CL_ANN, (CE_WARN,
1870 1870 "create_mfi_frame_pool: could not alloc."));
1871 1871 return (DDI_FAILURE);
1872 1872 }
1873 1873
1874 1874 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1875 1875
1876 1876 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1877 1877 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1878 1878 cmd->frame_phys_addr =
1879 1879 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1880 1880
1881 1881 cmd->sense = (uint8_t *)(((unsigned long)
1882 1882 cmd->frame_dma_obj.buffer) +
1883 1883 tot_frame_size - SENSE_LENGTH);
1884 1884 cmd->sense_phys_addr =
1885 1885 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1886 1886 tot_frame_size - SENSE_LENGTH;
1887 1887
1888 1888 if (!cmd->frame || !cmd->sense) {
1889 1889 con_log(CL_ANN, (CE_NOTE,
1890 1890 "megasas: pci_pool_alloc failed \n"));
1891 1891
1892 1892 return (-ENOMEM);
1893 1893 }
1894 1894
1895 1895 cmd->frame->io.context = cmd->index;
1896 1896 i++;
1897 1897
1898 1898 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1899 1899 cmd->frame->io.context, cmd->frame_phys_addr));
1900 1900 }
1901 1901
1902 1902 return (DDI_SUCCESS);
1903 1903 }
1904 1904
1905 1905 /*
1906 1906 * free_additional_dma_buffer
1907 1907 */
1908 1908 static void
1909 1909 free_additional_dma_buffer(struct megasas_instance *instance)
1910 1910 {
1911 1911 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1912 1912 (void) mega_free_dma_obj(instance,
1913 1913 instance->mfi_internal_dma_obj);
1914 1914 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1915 1915 }
1916 1916
1917 1917 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1918 1918 (void) mega_free_dma_obj(instance,
1919 1919 instance->mfi_evt_detail_obj);
1920 1920 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1921 1921 }
1922 1922 }
1923 1923
1924 1924 /*
1925 1925 * alloc_additional_dma_buffer
1926 1926 */
1927 1927 static int
1928 1928 alloc_additional_dma_buffer(struct megasas_instance *instance)
1929 1929 {
1930 1930 uint32_t reply_q_sz;
1931 1931 uint32_t internal_buf_size = PAGESIZE*2;
1932 1932
1933 1933 /* max cmds plus 1 + producer & consumer */
1934 1934 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1935 1935
1936 1936 instance->mfi_internal_dma_obj.size = internal_buf_size;
1937 1937 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr;
1938 1938 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1939 1939 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1940 1940 0xFFFFFFFFU;
1941 1941 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
1942 1942
1943 1943 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1944 1944 != 1) {
1945 1945 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1946 1946 return (DDI_FAILURE);
1947 1947 }
1948 1948
1949 1949 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1950 1950
1951 1951 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1952 1952
1953 1953 instance->producer = (uint32_t *)((unsigned long)
1954 1954 instance->mfi_internal_dma_obj.buffer);
1955 1955 instance->consumer = (uint32_t *)((unsigned long)
1956 1956 instance->mfi_internal_dma_obj.buffer + 4);
1957 1957 instance->reply_queue = (uint32_t *)((unsigned long)
1958 1958 instance->mfi_internal_dma_obj.buffer + 8);
1959 1959 instance->internal_buf = (caddr_t)(((unsigned long)
1960 1960 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1961 1961 instance->internal_buf_dmac_add =
1962 1962 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1963 1963 reply_q_sz;
1964 1964 instance->internal_buf_size = internal_buf_size -
1965 1965 (reply_q_sz + 8);
1966 1966
1967 1967 /* allocate evt_detail */
1968 1968 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1969 1969 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1970 1970 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1971 1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1972 1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1973 1973 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1974 1974
1975 1975 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1976 1976 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1977 1977 "could not data transfer buffer alloc."));
1978 1978 return (DDI_FAILURE);
1979 1979 }
1980 1980
1981 1981 bzero(instance->mfi_evt_detail_obj.buffer,
1982 1982 sizeof (struct megasas_evt_detail));
1983 1983
1984 1984 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1985 1985
1986 1986 return (DDI_SUCCESS);
1987 1987 }
1988 1988
1989 1989 /*
1990 1990 * free_space_for_mfi
1991 1991 */
1992 1992 static void
1993 1993 free_space_for_mfi(struct megasas_instance *instance)
1994 1994 {
1995 1995 int i;
1996 1996 uint32_t max_cmd = instance->max_fw_cmds;
1997 1997
1998 1998 /* already freed */
1999 1999 if (instance->cmd_list == NULL) {
2000 2000 return;
2001 2001 }
2002 2002
2003 2003 free_additional_dma_buffer(instance);
2004 2004
2005 2005 /* first free the MFI frame pool */
2006 2006 destroy_mfi_frame_pool(instance);
2007 2007
2008 2008 /* free all the commands in the cmd_list */
2009 2009 for (i = 0; i < instance->max_fw_cmds; i++) {
2010 2010 kmem_free(instance->cmd_list[i],
2011 2011 sizeof (struct megasas_cmd));
2012 2012
2013 2013 instance->cmd_list[i] = NULL;
2014 2014 }
2015 2015
2016 2016 /* free the cmd_list buffer itself */
2017 2017 kmem_free(instance->cmd_list,
2018 2018 sizeof (struct megasas_cmd *) * max_cmd);
2019 2019
2020 2020 instance->cmd_list = NULL;
2021 2021
2022 2022 INIT_LIST_HEAD(&instance->cmd_pool_list);
2023 2023 }
2024 2024
2025 2025 /*
2026 2026 * alloc_space_for_mfi
2027 2027 */
2028 2028 static int
2029 2029 alloc_space_for_mfi(struct megasas_instance *instance)
2030 2030 {
2031 2031 int i;
2032 2032 uint32_t max_cmd;
2033 2033 size_t sz;
2034 2034
2035 2035 struct megasas_cmd *cmd;
2036 2036
2037 2037 max_cmd = instance->max_fw_cmds;
2038 2038 sz = sizeof (struct megasas_cmd *) * max_cmd;
2039 2039
2040 2040 /*
2041 2041 * instance->cmd_list is an array of struct megasas_cmd pointers.
2042 2042 * Allocate the dynamic array first and then allocate individual
2043 2043 * commands.
2044 2044 */
2045 2045 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2046 2046 ASSERT(instance->cmd_list);
2047 2047
2048 2048 for (i = 0; i < max_cmd; i++) {
2049 2049 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2050 2050 KM_SLEEP);
2051 2051 ASSERT(instance->cmd_list[i]);
2052 2052 }
2053 2053
2054 2054 INIT_LIST_HEAD(&instance->cmd_pool_list);
2055 2055
2056 2056 /* add all the commands to command pool (instance->cmd_pool) */
2057 2057 for (i = 0; i < max_cmd; i++) {
2058 2058 cmd = instance->cmd_list[i];
2059 2059 cmd->index = i;
2060 2060
2061 2061 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2062 2062 }
2063 2063
2064 2064 /* create a frame pool and assign one frame to each cmd */
2065 2065 if (create_mfi_frame_pool(instance)) {
2066 2066 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2067 2067 return (DDI_FAILURE);
2068 2068 }
2069 2069
2070 2070 /* create a frame pool and assign one frame to each cmd */
2071 2071 if (alloc_additional_dma_buffer(instance)) {
2072 2072 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2073 2073 return (DDI_FAILURE);
2074 2074 }
2075 2075
2076 2076 return (DDI_SUCCESS);
2077 2077 }
2078 2078
2079 2079 /*
2080 2080 * get_ctrl_info
2081 2081 */
2082 2082 static int
2083 2083 get_ctrl_info(struct megasas_instance *instance,
2084 2084 struct megasas_ctrl_info *ctrl_info)
2085 2085 {
2086 2086 int ret = 0;
2087 2087
2088 2088 struct megasas_cmd *cmd;
2089 2089 struct megasas_dcmd_frame *dcmd;
2090 2090 struct megasas_ctrl_info *ci;
2091 2091
2092 2092 cmd = get_mfi_pkt(instance);
2093 2093
2094 2094 if (!cmd) {
2095 2095 con_log(CL_ANN, (CE_WARN,
2096 2096 "Failed to get a cmd for ctrl info\n"));
2097 2097 return (DDI_FAILURE);
2098 2098 }
2099 2099
2100 2100 dcmd = &cmd->frame->dcmd;
2101 2101
2102 2102 ci = (struct megasas_ctrl_info *)instance->internal_buf;
2103 2103
2104 2104 if (!ci) {
2105 2105 con_log(CL_ANN, (CE_WARN,
2106 2106 "Failed to alloc mem for ctrl info\n"));
2107 2107 return_mfi_pkt(instance, cmd);
2108 2108 return (DDI_FAILURE);
2109 2109 }
2110 2110
2111 2111 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2112 2112
2113 2113 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2114 2114 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2115 2115
2116 2116 dcmd->cmd = MFI_CMD_OP_DCMD;
2117 2117 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2118 2118 dcmd->sge_count = 1;
2119 2119 dcmd->flags = MFI_FRAME_DIR_READ;
2120 2120 dcmd->timeout = 0;
2121 2121 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info);
2122 2122 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2123 2123 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add;
2124 2124 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info);
2125 2125
2126 2126 cmd->frame_count = 1;
2127 2127
2128 2128 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2129 2129 ret = 0;
2130 2130 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2131 2131 } else {
2132 2132 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2133 2133 ret = -1;
2134 2134 }
2135 2135
2136 2136 return_mfi_pkt(instance, cmd);
2137 2137 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2138 2138 ret = -1;
2139 2139 }
2140 2140
2141 2141 return (ret);
2142 2142 }
2143 2143
2144 2144 /*
2145 2145 * abort_aen_cmd
2146 2146 */
2147 2147 static int
2148 2148 abort_aen_cmd(struct megasas_instance *instance,
2149 2149 struct megasas_cmd *cmd_to_abort)
2150 2150 {
2151 2151 int ret = 0;
2152 2152
2153 2153 struct megasas_cmd *cmd;
2154 2154 struct megasas_abort_frame *abort_fr;
2155 2155
2156 2156 cmd = get_mfi_pkt(instance);
2157 2157
2158 2158 if (!cmd) {
2159 2159 con_log(CL_ANN, (CE_WARN,
2160 2160 "Failed to get a cmd for ctrl info\n"));
2161 2161 return (DDI_FAILURE);
2162 2162 }
2163 2163
2164 2164 abort_fr = &cmd->frame->abort;
2165 2165
2166 2166 /* prepare and issue the abort frame */
2167 2167 abort_fr->cmd = MFI_CMD_OP_ABORT;
2168 2168 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2169 2169 abort_fr->flags = 0;
2170 2170 abort_fr->abort_context = cmd_to_abort->index;
2171 2171 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2172 2172 abort_fr->abort_mfi_phys_addr_hi = 0;
2173 2173
2174 2174 instance->aen_cmd->abort_aen = 1;
2175 2175
2176 2176 cmd->sync_cmd = MEGASAS_TRUE;
2177 2177 cmd->frame_count = 1;
2178 2178
2179 2179 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2180 2180 con_log(CL_ANN, (CE_WARN,
2181 2181 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2182 2182 ret = -1;
2183 2183 } else {
2184 2184 ret = 0;
2185 2185 }
2186 2186
2187 2187 instance->aen_cmd->abort_aen = 1;
2188 2188 instance->aen_cmd = 0;
2189 2189
2190 2190 return_mfi_pkt(instance, cmd);
2191 2191 (void) megasas_common_check(instance, cmd);
2192 2192
2193 2193 return (ret);
2194 2194 }
2195 2195
2196 2196 /*
2197 2197 * init_mfi
2198 2198 */
2199 2199 static int
2200 2200 init_mfi(struct megasas_instance *instance)
2201 2201 {
2202 2202 off_t reglength;
2203 2203 struct megasas_cmd *cmd;
2204 2204 struct megasas_ctrl_info ctrl_info;
2205 2205 struct megasas_init_frame *init_frame;
2206 2206 struct megasas_init_queue_info *initq_info;
2207 2207
2208 2208 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length)
2209 2209 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2210 2210 return (DDI_FAILURE);
2211 2211 }
2212 2212
2213 2213 if (reglength > DEFAULT_MFI_MEM_SZ) {
2214 2214 reglength = DEFAULT_MFI_MEM_SZ;
2215 2215 con_log(CL_DLEVEL1, (CE_NOTE,
2216 2216 "mega: register length to map is 0x%lx bytes", reglength));
2217 2217 }
2218 2218
2219 2219 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2220 2220 &instance->regmap, 0, reglength, &endian_attr,
2221 2221 &instance->regmap_handle) != DDI_SUCCESS) {
2222 2222 con_log(CL_ANN, (CE_NOTE,
2223 2223 "megaraid: couldn't map control registers"));
2224 2224
2225 2225 goto fail_mfi_reg_setup;
2226 2226 }
2227 2227
2228 2228 /* we expect the FW state to be READY */
2229 2229 if (mfi_state_transition_to_ready(instance)) {
2230 2230 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2231 2231 goto fail_ready_state;
2232 2232 }
2233 2233
2234 2234 /* get various operational parameters from status register */
2235 2235 instance->max_num_sge =
2236 2236 (instance->func_ptr->read_fw_status_reg(instance) &
2237 2237 0xFF0000) >> 0x10;
2238 2238 /*
2239 2239 * Reduce the max supported cmds by 1. This is to ensure that the
2240 2240 * reply_q_sz (1 more than the max cmd that driver may send)
2241 2241 * does not exceed max cmds that the FW can support
2242 2242 */
2243 2243 instance->max_fw_cmds =
2244 2244 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2245 2245 instance->max_fw_cmds = instance->max_fw_cmds - 1;
2246 2246
2247 2247 instance->max_num_sge =
2248 2248 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2249 2249 MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2250 2250
2251 2251 /* create a pool of commands */
2252 2252 if (alloc_space_for_mfi(instance))
2253 2253 goto fail_alloc_fw_space;
2254 2254
2255 2255 /* disable interrupt for initial preparation */
2256 2256 instance->func_ptr->disable_intr(instance);
2257 2257
2258 2258 /*
2259 2259 * Prepare a init frame. Note the init frame points to queue info
2260 2260 * structure. Each frame has SGL allocated after first 64 bytes. For
2261 2261 * this frame - since we don't need any SGL - we use SGL's space as
2262 2262 * queue info structure
2263 2263 */
2264 2264 cmd = get_mfi_pkt(instance);
2265 2265
2266 2266 init_frame = (struct megasas_init_frame *)cmd->frame;
2267 2267 initq_info = (struct megasas_init_queue_info *)
2268 2268 ((unsigned long)init_frame + 64);
2269 2269
2270 2270 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2271 2271 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2272 2272
2273 2273 initq_info->init_flags = 0;
2274 2274
2275 2275 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
2276 2276
2277 2277 initq_info->producer_index_phys_addr_hi = 0;
2278 2278 initq_info->producer_index_phys_addr_lo =
2279 2279 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2280 2280
2281 2281 initq_info->consumer_index_phys_addr_hi = 0;
2282 2282 initq_info->consumer_index_phys_addr_lo =
2283 2283 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2284 2284
2285 2285 initq_info->reply_queue_start_phys_addr_hi = 0;
2286 2286 initq_info->reply_queue_start_phys_addr_lo =
2287 2287 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2288 2288
2289 2289 init_frame->cmd = MFI_CMD_OP_INIT;
2290 2290 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2291 2291 init_frame->flags = 0;
2292 2292 init_frame->queue_info_new_phys_addr_lo =
2293 2293 cmd->frame_phys_addr + 64;
2294 2294 init_frame->queue_info_new_phys_addr_hi = 0;
2295 2295
2296 2296 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2297 2297
2298 2298 cmd->frame_count = 1;
2299 2299
2300 2300 /* issue the init frame in polled mode */
2301 2301 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2302 2302 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2303 2303 goto fail_fw_init;
2304 2304 }
2305 2305
2306 2306 return_mfi_pkt(instance, cmd);
2307 2307 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2308 2308 goto fail_fw_init;
2309 2309 }
2310 2310
2311 2311 /* gather misc FW related information */
2312 2312 if (!get_ctrl_info(instance, &ctrl_info)) {
2313 2313 instance->max_sectors_per_req = ctrl_info.max_request_size;
2314 2314 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2315 2315 ctrl_info.product_name, ctrl_info.ld_present_count));
2316 2316 } else {
2317 2317 instance->max_sectors_per_req = instance->max_num_sge *
2318 2318 PAGESIZE / 512;
2319 2319 }
2320 2320
2321 2321 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2322 2322 goto fail_fw_init;
2323 2323 }
2324 2324
2325 2325 return (0);
2326 2326
2327 2327 fail_fw_init:
2328 2328 fail_alloc_fw_space:
2329 2329
2330 2330 free_space_for_mfi(instance);
2331 2331
2332 2332 fail_ready_state:
2333 2333 ddi_regs_map_free(&instance->regmap_handle);
2334 2334
2335 2335 fail_mfi_reg_setup:
2336 2336 return (DDI_FAILURE);
2337 2337 }
2338 2338
2339 2339 /*
2340 2340 * mfi_state_transition_to_ready : Move the FW to READY state
2341 2341 *
2342 2342 * @reg_set : MFI register set
2343 2343 */
2344 2344 static int
2345 2345 mfi_state_transition_to_ready(struct megasas_instance *instance)
2346 2346 {
2347 2347 int i;
2348 2348 uint8_t max_wait;
2349 2349 uint32_t fw_ctrl;
2350 2350 uint32_t fw_state;
2351 2351 uint32_t cur_state;
2352 2352
2353 2353 fw_state =
2354 2354 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2355 2355 con_log(CL_ANN1, (CE_NOTE,
2356 2356 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2357 2357
2358 2358 while (fw_state != MFI_STATE_READY) {
2359 2359 con_log(CL_ANN, (CE_NOTE,
2360 2360 "mfi_state_transition_to_ready:FW state%x", fw_state));
2361 2361
2362 2362 switch (fw_state) {
2363 2363 case MFI_STATE_FAULT:
2364 2364 con_log(CL_ANN, (CE_NOTE,
2365 2365 "megasas: FW in FAULT state!!"));
2366 2366
2367 2367 return (-ENODEV);
2368 2368 case MFI_STATE_WAIT_HANDSHAKE:
2369 2369 /* set the CLR bit in IMR0 */
2370 2370 con_log(CL_ANN, (CE_NOTE,
2371 2371 "megasas: FW waiting for HANDSHAKE"));
2372 2372 /*
2373 2373 * PCI_Hot Plug: MFI F/W requires
2374 2374 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2375 2375 * to be set
2376 2376 */
2377 2377 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2378 2378 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2379 2379 MFI_INIT_HOTPLUG, instance);
2380 2380
2381 2381 max_wait = 2;
2382 2382 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2383 2383 break;
2384 2384 case MFI_STATE_BOOT_MESSAGE_PENDING:
2385 2385 /* set the CLR bit in IMR0 */
2386 2386 con_log(CL_ANN, (CE_NOTE,
2387 2387 "megasas: FW state boot message pending"));
2388 2388 /*
2389 2389 * PCI_Hot Plug: MFI F/W requires
2390 2390 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2391 2391 * to be set
2392 2392 */
2393 2393 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2394 2394
2395 2395 max_wait = 10;
2396 2396 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2397 2397 break;
2398 2398 case MFI_STATE_OPERATIONAL:
2399 2399 /* bring it to READY state; assuming max wait 2 secs */
2400 2400 instance->func_ptr->disable_intr(instance);
2401 2401 con_log(CL_ANN1, (CE_NOTE,
2402 2402 "megasas: FW in OPERATIONAL state"));
2403 2403 /*
2404 2404 * PCI_Hot Plug: MFI F/W requires
2405 2405 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2406 2406 * to be set
2407 2407 */
2408 2408 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2409 2409 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2410 2410
2411 2411 max_wait = 10;
2412 2412 cur_state = MFI_STATE_OPERATIONAL;
2413 2413 break;
2414 2414 case MFI_STATE_UNDEFINED:
2415 2415 /* this state should not last for more than 2 seconds */
2416 2416 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2417 2417
2418 2418 max_wait = 2;
2419 2419 cur_state = MFI_STATE_UNDEFINED;
2420 2420 break;
2421 2421 case MFI_STATE_BB_INIT:
2422 2422 max_wait = 2;
2423 2423 cur_state = MFI_STATE_BB_INIT;
2424 2424 break;
2425 2425 case MFI_STATE_FW_INIT:
2426 2426 max_wait = 2;
2427 2427 cur_state = MFI_STATE_FW_INIT;
2428 2428 break;
2429 2429 case MFI_STATE_DEVICE_SCAN:
2430 2430 max_wait = 10;
2431 2431 cur_state = MFI_STATE_DEVICE_SCAN;
2432 2432 break;
2433 2433 default:
2434 2434 con_log(CL_ANN, (CE_NOTE,
2435 2435 "megasas: Unknown state 0x%x\n", fw_state));
2436 2436 return (-ENODEV);
2437 2437 }
2438 2438
2439 2439 /* the cur_state should not last for more than max_wait secs */
2440 2440 for (i = 0; i < (max_wait * MILLISEC); i++) {
2441 2441 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2442 2442 fw_state =
2443 2443 instance->func_ptr->read_fw_status_reg(instance) &
2444 2444 MFI_STATE_MASK;
2445 2445
2446 2446 if (fw_state == cur_state) {
2447 2447 delay(1 * drv_usectohz(MILLISEC));
2448 2448 } else {
2449 2449 break;
2450 2450 }
2451 2451 }
2452 2452
2453 2453 /* return error if fw_state hasn't changed after max_wait */
2454 2454 if (fw_state == cur_state) {
2455 2455 con_log(CL_ANN, (CE_NOTE,
2456 2456 "FW state hasn't changed in %d secs\n", max_wait));
2457 2457 return (-ENODEV);
2458 2458 }
2459 2459 };
2460 2460
2461 2461 fw_ctrl = RD_IB_DOORBELL(instance);
2462 2462
2463 2463 con_log(CL_ANN1, (CE_NOTE,
2464 2464 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2465 2465
2466 2466 /*
2467 2467 * Write 0xF to the doorbell register to do the following.
2468 2468 * - Abort all outstanding commands (bit 0).
2469 2469 * - Transition from OPERATIONAL to READY state (bit 1).
2470 2470 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2471 2471 * - Set to release FW to continue running (i.e. BIOS handshake
2472 2472 * (bit 3).
2473 2473 */
2474 2474 WR_IB_DOORBELL(0xF, instance);
2475 2475
2476 2476 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2477 2477 return (-ENODEV);
2478 2478 }
2479 2479 return (0);
2480 2480 }
2481 2481
2482 2482 /*
2483 2483 * get_seq_num
2484 2484 */
2485 2485 static int
2486 2486 get_seq_num(struct megasas_instance *instance,
2487 2487 struct megasas_evt_log_info *eli)
2488 2488 {
2489 2489 int ret = 0;
2490 2490
2491 2491 dma_obj_t dcmd_dma_obj;
2492 2492 struct megasas_cmd *cmd;
2493 2493 struct megasas_dcmd_frame *dcmd;
2494 2494
2495 2495 cmd = get_mfi_pkt(instance);
2496 2496
2497 2497 if (!cmd) {
2498 2498 cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2499 2499 return (-ENOMEM);
2500 2500 }
2501 2501
2502 2502 dcmd = &cmd->frame->dcmd;
2503 2503
2504 2504 /* allocate the data transfer buffer */
2505 2505 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2506 2506 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2507 2507 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2508 2508 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2509 2509 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2510 2510 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2511 2511
2512 2512 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2513 2513 con_log(CL_ANN, (CE_WARN,
2514 2514 "get_seq_num: could not data transfer buffer alloc."));
2515 2515 return (DDI_FAILURE);
2516 2516 }
2517 2517
2518 2518 (void) memset(dcmd_dma_obj.buffer, 0,
2519 2519 sizeof (struct megasas_evt_log_info));
2520 2520
2521 2521 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2522 2522
2523 2523 dcmd->cmd = MFI_CMD_OP_DCMD;
2524 2524 dcmd->cmd_status = 0;
2525 2525 dcmd->sge_count = 1;
2526 2526 dcmd->flags = MFI_FRAME_DIR_READ;
2527 2527 dcmd->timeout = 0;
2528 2528 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2529 2529 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2530 2530 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2531 2531 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2532 2532
2533 2533 cmd->sync_cmd = MEGASAS_TRUE;
2534 2534 cmd->frame_count = 1;
2535 2535
2536 2536 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2537 2537 cmn_err(CE_WARN, "get_seq_num: "
2538 2538 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2539 2539 ret = -1;
2540 2540 } else {
2541 2541 /* copy the data back into callers buffer */
2542 2542 bcopy(dcmd_dma_obj.buffer, eli,
2543 2543 sizeof (struct megasas_evt_log_info));
2544 2544 ret = 0;
2545 2545 }
2546 2546
2547 2547 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2548 2548 ret = -1;
2549 2549
2550 2550 return_mfi_pkt(instance, cmd);
2551 2551 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2552 2552 ret = -1;
2553 2553 }
2554 2554 return (ret);
2555 2555 }
2556 2556
2557 2557 /*
2558 2558 * start_mfi_aen
2559 2559 */
2560 2560 static int
2561 2561 start_mfi_aen(struct megasas_instance *instance)
2562 2562 {
2563 2563 int ret = 0;
2564 2564
2565 2565 struct megasas_evt_log_info eli;
2566 2566 union megasas_evt_class_locale class_locale;
2567 2567
2568 2568 /* get the latest sequence number from FW */
2569 2569 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2570 2570
2571 2571 if (get_seq_num(instance, &eli)) {
2572 2572 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2573 2573 return (-1);
2574 2574 }
2575 2575
2576 2576 /* register AEN with FW for latest sequence number plus 1 */
2577 2577 class_locale.members.reserved = 0;
2578 2578 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2579 2579 class_locale.members.class = MR_EVT_CLASS_CRITICAL;
2580 2580
2581 2581 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2582 2582 class_locale.word);
2583 2583
2584 2584 if (ret) {
2585 2585 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2586 2586 return (-1);
2587 2587 }
2588 2588
2589 2589 return (ret);
2590 2590 }
2591 2591
2592 2592 /*
2593 2593 * flush_cache
2594 2594 */
2595 2595 static void
2596 2596 flush_cache(struct megasas_instance *instance)
2597 2597 {
2598 2598 struct megasas_cmd *cmd;
2599 2599 struct megasas_dcmd_frame *dcmd;
2600 2600
2601 2601 if (!(cmd = get_mfi_pkt(instance)))
2602 2602 return;
2603 2603
2604 2604 dcmd = &cmd->frame->dcmd;
2605 2605
2606 2606 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2607 2607
2608 2608 dcmd->cmd = MFI_CMD_OP_DCMD;
2609 2609 dcmd->cmd_status = 0x0;
2610 2610 dcmd->sge_count = 0;
2611 2611 dcmd->flags = MFI_FRAME_DIR_NONE;
2612 2612 dcmd->timeout = 0;
2613 2613 dcmd->data_xfer_len = 0;
2614 2614 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2615 2615 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2616 2616
2617 2617 cmd->frame_count = 1;
2618 2618
2619 2619 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2620 2620 cmn_err(CE_WARN,
2621 2621 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2622 2622 }
2623 2623 con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2624 2624 return_mfi_pkt(instance, cmd);
2625 2625 (void) megasas_common_check(instance, cmd);
2626 2626 }
2627 2627
2628 2628 /*
2629 2629 * service_mfi_aen- Completes an AEN command
2630 2630 * @instance: Adapter soft state
2631 2631 * @cmd: Command to be completed
2632 2632 *
2633 2633 */
2634 2634 static void
2635 2635 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2636 2636 {
2637 2637 uint32_t seq_num;
2638 2638 struct megasas_evt_detail *evt_detail =
2639 2639 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2640 2640
2641 2641 cmd->cmd_status = cmd->frame->io.cmd_status;
2642 2642
2643 2643 if (cmd->cmd_status == ENODATA) {
2644 2644 cmd->cmd_status = 0;
2645 2645 }
2646 2646
2647 2647 /*
2648 2648 * log the MFI AEN event to the sysevent queue so that
2649 2649 * application will get noticed
2650 2650 */
2651 2651 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2652 2652 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2653 2653 int instance_no = ddi_get_instance(instance->dip);
2654 2654 con_log(CL_ANN, (CE_WARN,
2655 2655 "mega%d: Failed to log AEN event", instance_no));
2656 2656 }
2657 2657
2658 2658 /* get copy of seq_num and class/locale for re-registration */
2659 2659 seq_num = evt_detail->seq_num;
2660 2660 seq_num++;
2661 2661 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2662 2662 sizeof (struct megasas_evt_detail));
2663 2663
2664 2664 cmd->frame->dcmd.cmd_status = 0x0;
2665 2665 cmd->frame->dcmd.mbox.w[0] = seq_num;
2666 2666
2667 2667 instance->aen_seq_num = seq_num;
2668 2668
2669 2669 cmd->frame_count = 1;
2670 2670
2671 2671 /* Issue the aen registration frame */
2672 2672 instance->func_ptr->issue_cmd(cmd, instance);
2673 2673 }
2674 2674
2675 2675 /*
2676 2676 * complete_cmd_in_sync_mode - Completes an internal command
2677 2677 * @instance: Adapter soft state
2678 2678 * @cmd: Command to be completed
2679 2679 *
2680 2680 * The issue_cmd_in_sync_mode() function waits for a command to complete
2681 2681 * after it issues a command. This function wakes up that waiting routine by
2682 2682 * calling wake_up() on the wait queue.
2683 2683 */
2684 2684 static void
2685 2685 complete_cmd_in_sync_mode(struct megasas_instance *instance,
2686 2686 struct megasas_cmd *cmd)
2687 2687 {
2688 2688 cmd->cmd_status = cmd->frame->io.cmd_status;
2689 2689
2690 2690 cmd->sync_cmd = MEGASAS_FALSE;
2691 2691
2692 2692 if (cmd->cmd_status == ENODATA) {
2693 2693 cmd->cmd_status = 0;
2694 2694 }
2695 2695
2696 2696 cv_broadcast(&instance->int_cmd_cv);
2697 2697 }
2698 2698
2699 2699 /*
2700 2700 * megasas_softintr - The Software ISR
2701 2701 * @param arg : HBA soft state
2702 2702 *
2703 2703 * called from high-level interrupt if hi-level interrupt are not there,
2704 2704 * otherwise triggered as a soft interrupt
2705 2705 */
2706 2706 static uint_t
2707 2707 megasas_softintr(struct megasas_instance *instance)
2708 2708 {
2709 2709 struct scsi_pkt *pkt;
2710 2710 struct scsa_cmd *acmd;
2711 2711 struct megasas_cmd *cmd;
2712 2712 struct mlist_head *pos, *next;
2713 2713 mlist_t process_list;
2714 2714 struct megasas_header *hdr;
2715 2715 struct scsi_arq_status *arqstat;
2716 2716
2717 2717 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2718 2718
2719 2719 ASSERT(instance);
2720 2720 mutex_enter(&instance->completed_pool_mtx);
2721 2721
2722 2722 if (mlist_empty(&instance->completed_pool_list)) {
2723 2723 mutex_exit(&instance->completed_pool_mtx);
2724 2724 return (DDI_INTR_UNCLAIMED);
2725 2725 }
2726 2726
2727 2727 instance->softint_running = 1;
2728 2728
2729 2729 INIT_LIST_HEAD(&process_list);
2730 2730 mlist_splice(&instance->completed_pool_list, &process_list);
2731 2731 INIT_LIST_HEAD(&instance->completed_pool_list);
2732 2732
2733 2733 mutex_exit(&instance->completed_pool_mtx);
2734 2734
2735 2735 /* perform all callbacks first, before releasing the SCBs */
2736 2736 mlist_for_each_safe(pos, next, &process_list) {
2737 2737 cmd = mlist_entry(pos, struct megasas_cmd, list);
2738 2738
2739 2739 /* syncronize the Cmd frame for the controller */
2740 2740 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2741 2741 0, 0, DDI_DMA_SYNC_FORCPU);
2742 2742
2743 2743 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2744 2744 DDI_SUCCESS) {
2745 2745 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2746 2746 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2747 2747 return (DDI_INTR_UNCLAIMED);
2748 2748 }
2749 2749
2750 2750 hdr = &cmd->frame->hdr;
2751 2751
2752 2752 /* remove the internal command from the process list */
2753 2753 mlist_del_init(&cmd->list);
2754 2754
2755 2755 switch (hdr->cmd) {
2756 2756 case MFI_CMD_OP_PD_SCSI:
2757 2757 case MFI_CMD_OP_LD_SCSI:
2758 2758 case MFI_CMD_OP_LD_READ:
2759 2759 case MFI_CMD_OP_LD_WRITE:
2760 2760 /*
2761 2761 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2762 2762 * could have been issued either through an
2763 2763 * IO path or an IOCTL path. If it was via IOCTL,
2764 2764 * we will send it to internal completion.
2765 2765 */
2766 2766 if (cmd->sync_cmd == MEGASAS_TRUE) {
2767 2767 complete_cmd_in_sync_mode(instance, cmd);
2768 2768 break;
2769 2769 }
2770 2770
2771 2771 /* regular commands */
2772 2772 acmd = cmd->cmd;
2773 2773 pkt = CMD2PKT(acmd);
2774 2774
2775 2775 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2776 2776 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2777 2777 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2778 2778 acmd->cmd_dma_offset,
2779 2779 acmd->cmd_dma_len,
2780 2780 DDI_DMA_SYNC_FORCPU);
2781 2781 }
2782 2782 }
2783 2783
2784 2784 pkt->pkt_reason = CMD_CMPLT;
2785 2785 pkt->pkt_statistics = 0;
2786 2786 pkt->pkt_state = STATE_GOT_BUS
2787 2787 | STATE_GOT_TARGET | STATE_SENT_CMD
2788 2788 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2789 2789
2790 2790 con_log(CL_ANN1, (CE_CONT,
2791 2791 "CDB[0] = %x completed for %s: size %lx context %x",
2792 2792 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2793 2793 acmd->cmd_dmacount, hdr->context));
2794 2794
2795 2795 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2796 2796 struct scsi_inquiry *inq;
2797 2797
2798 2798 if (acmd->cmd_dmacount != 0) {
2799 2799 bp_mapin(acmd->cmd_buf);
2800 2800 inq = (struct scsi_inquiry *)
2801 2801 acmd->cmd_buf->b_un.b_addr;
2802 2802
2803 2803 /* don't expose physical drives to OS */
2804 2804 if (acmd->islogical &&
2805 2805 (hdr->cmd_status == MFI_STAT_OK)) {
2806 2806 display_scsi_inquiry(
2807 2807 (caddr_t)inq);
2808 2808 } else if ((hdr->cmd_status ==
2809 2809 MFI_STAT_OK) && inq->inq_dtype ==
2810 2810 DTYPE_DIRECT) {
2811 2811
2812 2812 display_scsi_inquiry(
2813 2813 (caddr_t)inq);
2814 2814
2815 2815 /* for physical disk */
2816 2816 hdr->cmd_status =
2817 2817 MFI_STAT_DEVICE_NOT_FOUND;
2818 2818 }
2819 2819 }
2820 2820 }
2821 2821
2822 2822 switch (hdr->cmd_status) {
2823 2823 case MFI_STAT_OK:
2824 2824 pkt->pkt_scbp[0] = STATUS_GOOD;
2825 2825 break;
2826 2826 case MFI_STAT_LD_CC_IN_PROGRESS:
2827 2827 case MFI_STAT_LD_RECON_IN_PROGRESS:
2828 2828 /* SJ - these are not correct way */
2829 2829 pkt->pkt_scbp[0] = STATUS_GOOD;
2830 2830 break;
2831 2831 case MFI_STAT_LD_INIT_IN_PROGRESS:
2832 2832 con_log(CL_ANN,
2833 2833 (CE_WARN, "Initialization in Progress"));
2834 2834 pkt->pkt_reason = CMD_TRAN_ERR;
2835 2835
2836 2836 break;
2837 2837 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2838 2838 con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2839 2839
2840 2840 pkt->pkt_reason = CMD_CMPLT;
2841 2841 ((struct scsi_status *)
2842 2842 pkt->pkt_scbp)->sts_chk = 1;
2843 2843
2844 2844 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2845 2845
2846 2846 con_log(CL_ANN,
2847 2847 (CE_WARN, "TEST_UNIT_READY fail"));
2848 2848
2849 2849 } else {
2850 2850 pkt->pkt_state |= STATE_ARQ_DONE;
2851 2851 arqstat = (void *)(pkt->pkt_scbp);
2852 2852 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2853 2853 arqstat->sts_rqpkt_resid = 0;
2854 2854 arqstat->sts_rqpkt_state |=
2855 2855 STATE_GOT_BUS | STATE_GOT_TARGET
2856 2856 | STATE_SENT_CMD
2857 2857 | STATE_XFERRED_DATA;
2858 2858 *(uint8_t *)&arqstat->sts_rqpkt_status =
2859 2859 STATUS_GOOD;
2860 2860
2861 2861 bcopy(cmd->sense,
2862 2862 &(arqstat->sts_sensedata),
2863 2863 acmd->cmd_scblen -
2864 2864 offsetof(struct scsi_arq_status,
2865 2865 sts_sensedata));
2866 2866 }
2867 2867 break;
2868 2868 case MFI_STAT_LD_OFFLINE:
2869 2869 case MFI_STAT_DEVICE_NOT_FOUND:
2870 2870 con_log(CL_ANN1, (CE_CONT,
2871 2871 "device not found error"));
2872 2872 pkt->pkt_reason = CMD_DEV_GONE;
2873 2873 pkt->pkt_statistics = STAT_DISCON;
2874 2874 break;
2875 2875 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2876 2876 pkt->pkt_state |= STATE_ARQ_DONE;
2877 2877 pkt->pkt_reason = CMD_CMPLT;
2878 2878 ((struct scsi_status *)
2879 2879 pkt->pkt_scbp)->sts_chk = 1;
2880 2880
2881 2881 arqstat = (void *)(pkt->pkt_scbp);
2882 2882 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2883 2883 arqstat->sts_rqpkt_resid = 0;
2884 2884 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2885 2885 | STATE_GOT_TARGET | STATE_SENT_CMD
2886 2886 | STATE_XFERRED_DATA;
2887 2887 *(uint8_t *)&arqstat->sts_rqpkt_status =
2888 2888 STATUS_GOOD;
2889 2889
2890 2890 arqstat->sts_sensedata.es_valid = 1;
2891 2891 arqstat->sts_sensedata.es_key =
2892 2892 KEY_ILLEGAL_REQUEST;
2893 2893 arqstat->sts_sensedata.es_class =
2894 2894 CLASS_EXTENDED_SENSE;
2895 2895
2896 2896 /*
2897 2897 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2898 2898 * ASC: 0x21h; ASCQ: 0x00h;
2899 2899 */
2900 2900 arqstat->sts_sensedata.es_add_code = 0x21;
2901 2901 arqstat->sts_sensedata.es_qual_code = 0x00;
2902 2902
2903 2903 break;
2904 2904
2905 2905 default:
2906 2906 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2907 2907 pkt->pkt_reason = CMD_TRAN_ERR;
2908 2908
2909 2909 break;
2910 2910 }
2911 2911
2912 2912 atomic_add_16(&instance->fw_outstanding, (-1));
2913 2913
2914 2914 return_mfi_pkt(instance, cmd);
2915 2915
2916 2916 (void) megasas_common_check(instance, cmd);
2917 2917
2918 2918 if (acmd->cmd_dmahandle) {
2919 2919 if (megasas_check_dma_handle(
2920 2920 acmd->cmd_dmahandle) != DDI_SUCCESS) {
2921 2921 ddi_fm_service_impact(instance->dip,
2922 2922 DDI_SERVICE_UNAFFECTED);
2923 2923 pkt->pkt_reason = CMD_TRAN_ERR;
2924 2924 pkt->pkt_statistics = 0;
2925 2925 }
2926 2926 }
2927 2927
2928 2928 /* Call the callback routine */
2929 2929 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2930 2930 scsi_hba_pkt_comp(pkt);
2931 2931 }
2932 2932
2933 2933 break;
2934 2934 case MFI_CMD_OP_SMP:
2935 2935 case MFI_CMD_OP_STP:
2936 2936 complete_cmd_in_sync_mode(instance, cmd);
2937 2937 break;
2938 2938 case MFI_CMD_OP_DCMD:
2939 2939 /* see if got an event notification */
2940 2940 if (cmd->frame->dcmd.opcode ==
2941 2941 MR_DCMD_CTRL_EVENT_WAIT) {
2942 2942 if ((instance->aen_cmd == cmd) &&
2943 2943 (instance->aen_cmd->abort_aen)) {
2944 2944 con_log(CL_ANN, (CE_WARN,
2945 2945 "megasas_softintr: "
2946 2946 "aborted_aen returned"));
2947 2947 } else {
2948 2948 service_mfi_aen(instance, cmd);
2949 2949
2950 2950 atomic_add_16(&instance->fw_outstanding,
2951 2951 (-1));
2952 2952 }
2953 2953 } else {
2954 2954 complete_cmd_in_sync_mode(instance, cmd);
2955 2955 }
2956 2956
2957 2957 break;
2958 2958 case MFI_CMD_OP_ABORT:
2959 2959 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2960 2960 /*
2961 2961 * MFI_CMD_OP_ABORT successfully completed
2962 2962 * in the synchronous mode
2963 2963 */
2964 2964 complete_cmd_in_sync_mode(instance, cmd);
2965 2965 break;
2966 2966 default:
2967 2967 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2968 2968 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2969 2969
2970 2970 if (cmd->pkt != NULL) {
2971 2971 pkt = cmd->pkt;
2972 2972 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2973 2973 scsi_hba_pkt_comp(pkt);
2974 2974 }
2975 2975 }
2976 2976 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2977 2977 break;
2978 2978 }
2979 2979 }
2980 2980
2981 2981 instance->softint_running = 0;
2982 2982
2983 2983 return (DDI_INTR_CLAIMED);
2984 2984 }
2985 2985
2986 2986 /*
2987 2987 * mega_alloc_dma_obj
2988 2988 *
2989 2989 * Allocate the memory and other resources for an dma object.
2990 2990 */
2991 2991 static int
2992 2992 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2993 2993 {
2994 2994 int i;
2995 2995 size_t alen = 0;
2996 2996 uint_t cookie_cnt;
2997 2997 struct ddi_device_acc_attr tmp_endian_attr;
2998 2998
2999 2999 tmp_endian_attr = endian_attr;
3000 3000 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3001 3001 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
3002 3002 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3003 3003 if (i != DDI_SUCCESS) {
3004 3004
3005 3005 switch (i) {
3006 3006 case DDI_DMA_BADATTR :
3007 3007 con_log(CL_ANN, (CE_WARN,
3008 3008 "Failed ddi_dma_alloc_handle- Bad atrib"));
3009 3009 break;
3010 3010 case DDI_DMA_NORESOURCES :
3011 3011 con_log(CL_ANN, (CE_WARN,
3012 3012 "Failed ddi_dma_alloc_handle- No Resources"));
3013 3013 break;
3014 3014 default :
3015 3015 con_log(CL_ANN, (CE_WARN,
3016 3016 "Failed ddi_dma_alloc_handle :unknown %d", i));
3017 3017 break;
3018 3018 }
3019 3019
3020 3020 return (-1);
3021 3021 }
3022 3022
3023 3023 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
3024 3024 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3025 3025 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3026 3026 alen < obj->size) {
3027 3027
3028 3028 ddi_dma_free_handle(&obj->dma_handle);
3029 3029
3030 3030 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3031 3031
3032 3032 return (-1);
3033 3033 }
3034 3034
3035 3035 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3036 3036 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3037 3037 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3038 3038
3039 3039 ddi_dma_mem_free(&obj->acc_handle);
3040 3040 ddi_dma_free_handle(&obj->dma_handle);
3041 3041
3042 3042 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3043 3043
3044 3044 return (-1);
3045 3045 }
3046 3046
3047 3047 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3048 3048 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3049 3049 return (-1);
3050 3050 }
3051 3051
3052 3052 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3053 3053 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3054 3054 return (-1);
3055 3055 }
3056 3056
3057 3057 return (cookie_cnt);
3058 3058 }
3059 3059
3060 3060 /*
3061 3061 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3062 3062 *
3063 3063 * De-allocate the memory and other resources for an dma object, which must
3064 3064 * have been alloated by a previous call to mega_alloc_dma_obj()
3065 3065 */
3066 3066 static int
3067 3067 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3068 3068 {
3069 3069
3070 3070 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3071 3071 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3072 3072 return (DDI_FAILURE);
3073 3073 }
3074 3074
3075 3075 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3076 3076 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3077 3077 return (DDI_FAILURE);
3078 3078 }
3079 3079
3080 3080 (void) ddi_dma_unbind_handle(obj.dma_handle);
3081 3081 ddi_dma_mem_free(&obj.acc_handle);
3082 3082 ddi_dma_free_handle(&obj.dma_handle);
3083 3083
3084 3084 return (DDI_SUCCESS);
3085 3085 }
3086 3086
3087 3087 /*
3088 3088 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3089 3089 * int, int (*)())
3090 3090 *
3091 3091 * Allocate dma resources for a new scsi command
3092 3092 */
3093 3093 static int
3094 3094 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3095 3095 struct buf *bp, int flags, int (*callback)())
3096 3096 {
3097 3097 int dma_flags;
3098 3098 int (*cb)(caddr_t);
3099 3099 int i;
3100 3100
3101 3101 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr;
3102 3102 struct scsa_cmd *acmd = PKT2CMD(pkt);
3103 3103
3104 3104 acmd->cmd_buf = bp;
3105 3105
3106 3106 if (bp->b_flags & B_READ) {
3107 3107 acmd->cmd_flags &= ~CFLAG_DMASEND;
3108 3108 dma_flags = DDI_DMA_READ;
3109 3109 } else {
3110 3110 acmd->cmd_flags |= CFLAG_DMASEND;
3111 3111 dma_flags = DDI_DMA_WRITE;
3112 3112 }
3113 3113
3114 3114 if (flags & PKT_CONSISTENT) {
3115 3115 acmd->cmd_flags |= CFLAG_CONSISTENT;
3116 3116 dma_flags |= DDI_DMA_CONSISTENT;
3117 3117 }
3118 3118
3119 3119 if (flags & PKT_DMA_PARTIAL) {
3120 3120 dma_flags |= DDI_DMA_PARTIAL;
3121 3121 }
3122 3122
3123 3123 dma_flags |= DDI_DMA_REDZONE;
3124 3124
3125 3125 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3126 3126
3127 3127 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3128 3128 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3129 3129
3130 3130 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3131 3131 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3132 3132 switch (i) {
3133 3133 case DDI_DMA_BADATTR:
3134 3134 bioerror(bp, EFAULT);
3135 3135 return (-1);
3136 3136
3137 3137 case DDI_DMA_NORESOURCES:
3138 3138 bioerror(bp, 0);
3139 3139 return (-1);
3140 3140
3141 3141 default:
3142 3142 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3143 3143 "0x%x impossible\n", i));
3144 3144 bioerror(bp, EFAULT);
3145 3145 return (-1);
3146 3146 }
3147 3147 }
3148 3148
3149 3149 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3150 3150 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3151 3151
3152 3152 switch (i) {
3153 3153 case DDI_DMA_PARTIAL_MAP:
3154 3154 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3155 3155 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3156 3156 "DDI_DMA_PARTIAL_MAP impossible\n"));
3157 3157 goto no_dma_cookies;
3158 3158 }
3159 3159
3160 3160 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3161 3161 DDI_FAILURE) {
3162 3162 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3163 3163 goto no_dma_cookies;
3164 3164 }
3165 3165
3166 3166 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3167 3167 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3168 3168 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3169 3169 DDI_FAILURE) {
3170 3170
3171 3171 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3172 3172 goto no_dma_cookies;
3173 3173 }
3174 3174
3175 3175 goto get_dma_cookies;
3176 3176 case DDI_DMA_MAPPED:
3177 3177 acmd->cmd_nwin = 1;
3178 3178 acmd->cmd_dma_len = 0;
3179 3179 acmd->cmd_dma_offset = 0;
3180 3180
3181 3181 get_dma_cookies:
3182 3182 i = 0;
3183 3183 acmd->cmd_dmacount = 0;
3184 3184 for (;;) {
3185 3185 acmd->cmd_dmacount +=
3186 3186 acmd->cmd_dmacookies[i++].dmac_size;
3187 3187
3188 3188 if (i == instance->max_num_sge ||
3189 3189 i == acmd->cmd_ncookies)
3190 3190 break;
3191 3191
3192 3192 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3193 3193 &acmd->cmd_dmacookies[i]);
3194 3194 }
3195 3195
3196 3196 acmd->cmd_cookie = i;
3197 3197 acmd->cmd_cookiecnt = i;
3198 3198
3199 3199 acmd->cmd_flags |= CFLAG_DMAVALID;
3200 3200
3201 3201 if (bp->b_bcount >= acmd->cmd_dmacount) {
3202 3202 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3203 3203 } else {
3204 3204 pkt->pkt_resid = 0;
3205 3205 }
3206 3206
3207 3207 return (0);
3208 3208 case DDI_DMA_NORESOURCES:
3209 3209 bioerror(bp, 0);
3210 3210 break;
3211 3211 case DDI_DMA_NOMAPPING:
3212 3212 bioerror(bp, EFAULT);
3213 3213 break;
3214 3214 case DDI_DMA_TOOBIG:
3215 3215 bioerror(bp, EINVAL);
3216 3216 break;
3217 3217 case DDI_DMA_INUSE:
3218 3218 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3219 3219 " DDI_DMA_INUSE impossible\n"));
3220 3220 break;
3221 3221 default:
3222 3222 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3223 3223 "0x%x impossible\n", i));
3224 3224 break;
3225 3225 }
3226 3226
3227 3227 no_dma_cookies:
3228 3228 ddi_dma_free_handle(&acmd->cmd_dmahandle);
3229 3229 acmd->cmd_dmahandle = NULL;
3230 3230 acmd->cmd_flags &= ~CFLAG_DMAVALID;
3231 3231 return (-1);
3232 3232 }
3233 3233
3234 3234 /*
3235 3235 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3236 3236 *
3237 3237 * move dma resources to next dma window
3238 3238 *
3239 3239 */
3240 3240 static int
3241 3241 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3242 3242 struct buf *bp)
3243 3243 {
3244 3244 int i = 0;
3245 3245
3246 3246 struct scsa_cmd *acmd = PKT2CMD(pkt);
3247 3247
3248 3248 /*
3249 3249 * If there are no more cookies remaining in this window,
3250 3250 * must move to the next window first.
3251 3251 */
3252 3252 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3253 3253 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3254 3254 return (0);
3255 3255 }
3256 3256
3257 3257 /* at last window, cannot move */
3258 3258 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3259 3259 return (-1);
3260 3260 }
3261 3261
3262 3262 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3263 3263 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3264 3264 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3265 3265 DDI_FAILURE) {
3266 3266 return (-1);
3267 3267 }
3268 3268
3269 3269 acmd->cmd_cookie = 0;
3270 3270 } else {
3271 3271 /* still more cookies in this window - get the next one */
3272 3272 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3273 3273 &acmd->cmd_dmacookies[0]);
3274 3274 }
3275 3275
3276 3276 /* get remaining cookies in this window, up to our maximum */
3277 3277 for (;;) {
3278 3278 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3279 3279 acmd->cmd_cookie++;
3280 3280
3281 3281 if (i == instance->max_num_sge ||
3282 3282 acmd->cmd_cookie == acmd->cmd_ncookies) {
3283 3283 break;
3284 3284 }
3285 3285
3286 3286 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3287 3287 &acmd->cmd_dmacookies[i]);
3288 3288 }
3289 3289
3290 3290 acmd->cmd_cookiecnt = i;
3291 3291
3292 3292 if (bp->b_bcount >= acmd->cmd_dmacount) {
3293 3293 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3294 3294 } else {
3295 3295 pkt->pkt_resid = 0;
3296 3296 }
3297 3297
3298 3298 return (0);
3299 3299 }
3300 3300
3301 3301 /*
3302 3302 * build_cmd
3303 3303 */
3304 3304 static struct megasas_cmd *
3305 3305 build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3306 3306 struct scsi_pkt *pkt, uchar_t *cmd_done)
3307 3307 {
3308 3308 uint16_t flags = 0;
3309 3309 uint32_t i;
3310 3310 uint32_t context;
3311 3311 uint32_t sge_bytes;
3312 3312
3313 3313 struct megasas_cmd *cmd;
3314 3314 struct megasas_sge64 *mfi_sgl;
3315 3315 struct scsa_cmd *acmd = PKT2CMD(pkt);
3316 3316 struct megasas_pthru_frame *pthru;
3317 3317 struct megasas_io_frame *ldio;
3318 3318
3319 3319 /* find out if this is logical or physical drive command. */
3320 3320 acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3321 3321 acmd->device_id = MAP_DEVICE_ID(instance, ap);
3322 3322 *cmd_done = 0;
3323 3323
3324 3324 /* get the command packet */
3325 3325 if (!(cmd = get_mfi_pkt(instance))) {
3326 3326 return (NULL);
3327 3327 }
3328 3328
3329 3329 cmd->pkt = pkt;
3330 3330 cmd->cmd = acmd;
3331 3331
3332 3332 /* lets get the command directions */
3333 3333 if (acmd->cmd_flags & CFLAG_DMASEND) {
3334 3334 flags = MFI_FRAME_DIR_WRITE;
3335 3335
3336 3336 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3337 3337 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3338 3338 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3339 3339 DDI_DMA_SYNC_FORDEV);
3340 3340 }
3341 3341 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3342 3342 flags = MFI_FRAME_DIR_READ;
3343 3343
3344 3344 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3345 3345 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3346 3346 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3347 3347 DDI_DMA_SYNC_FORCPU);
3348 3348 }
3349 3349 } else {
3350 3350 flags = MFI_FRAME_DIR_NONE;
3351 3351 }
3352 3352
3353 3353 flags |= MFI_FRAME_SGL64;
3354 3354
3355 3355 switch (pkt->pkt_cdbp[0]) {
3356 3356
3357 3357 /*
3358 3358 * case SCMD_SYNCHRONIZE_CACHE:
3359 3359 * flush_cache(instance);
3360 3360 * return_mfi_pkt(instance, cmd);
3361 3361 * *cmd_done = 1;
3362 3362 *
3363 3363 * return (NULL);
3364 3364 */
3365 3365
3366 3366 case SCMD_READ:
3367 3367 case SCMD_WRITE:
3368 3368 case SCMD_READ_G1:
3369 3369 case SCMD_WRITE_G1:
3370 3370 if (acmd->islogical) {
3371 3371 ldio = (struct megasas_io_frame *)cmd->frame;
3372 3372
3373 3373 /*
3374 3374 * preare the Logical IO frame:
3375 3375 * 2nd bit is zero for all read cmds
3376 3376 */
3377 3377 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3378 3378 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3379 3379 ldio->cmd_status = 0x0;
3380 3380 ldio->scsi_status = 0x0;
3381 3381 ldio->target_id = acmd->device_id;
3382 3382 ldio->timeout = 0;
3383 3383 ldio->reserved_0 = 0;
3384 3384 ldio->pad_0 = 0;
3385 3385 ldio->flags = flags;
3386 3386
3387 3387 /* Initialize sense Information */
3388 3388 bzero(cmd->sense, SENSE_LENGTH);
3389 3389 ldio->sense_len = SENSE_LENGTH;
3390 3390 ldio->sense_buf_phys_addr_hi = 0;
3391 3391 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3392 3392
3393 3393 ldio->start_lba_hi = 0;
3394 3394 ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3395 3395 pkt->pkt_cdbp[1] : 0;
3396 3396 ldio->sge_count = acmd->cmd_cookiecnt;
3397 3397 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl;
3398 3398
3399 3399 context = ldio->context;
3400 3400
3401 3401 if (acmd->cmd_cdblen == CDB_GROUP0) {
3402 3402 ldio->lba_count = host_to_le16(
3403 3403 (uint16_t)(pkt->pkt_cdbp[4]));
3404 3404
3405 3405 ldio->start_lba_lo = host_to_le32(
3406 3406 ((uint32_t)(pkt->pkt_cdbp[3])) |
3407 3407 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3408 3408 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3409 3409 << 16));
3410 3410 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
3411 3411 ldio->lba_count = host_to_le16(
3412 3412 ((uint16_t)(pkt->pkt_cdbp[8])) |
3413 3413 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3414 3414
3415 3415 ldio->start_lba_lo = host_to_le32(
3416 3416 ((uint32_t)(pkt->pkt_cdbp[5])) |
3417 3417 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3418 3418 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3419 3419 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3420 3420 } else if (acmd->cmd_cdblen == CDB_GROUP2) {
3421 3421 ldio->lba_count = host_to_le16(
3422 3422 ((uint16_t)(pkt->pkt_cdbp[9])) |
3423 3423 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3424 3424 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3425 3425 ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3426 3426
3427 3427 ldio->start_lba_lo = host_to_le32(
3428 3428 ((uint32_t)(pkt->pkt_cdbp[5])) |
3429 3429 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3430 3430 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3431 3431 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3432 3432 } else if (acmd->cmd_cdblen == CDB_GROUP3) {
3433 3433 ldio->lba_count = host_to_le16(
3434 3434 ((uint16_t)(pkt->pkt_cdbp[13])) |
3435 3435 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3436 3436 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3437 3437 ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3438 3438
3439 3439 ldio->start_lba_lo = host_to_le32(
3440 3440 ((uint32_t)(pkt->pkt_cdbp[9])) |
3441 3441 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3442 3442 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3443 3443 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3444 3444
3445 3445 ldio->start_lba_lo = host_to_le32(
3446 3446 ((uint32_t)(pkt->pkt_cdbp[5])) |
3447 3447 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3448 3448 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3449 3449 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3450 3450 }
3451 3451
3452 3452 break;
3453 3453 }
3454 3454 /* fall through For all non-rd/wr cmds */
3455 3455 default:
3456 3456 pthru = (struct megasas_pthru_frame *)cmd->frame;
3457 3457
3458 3458 /* prepare the DCDB frame */
3459 3459 pthru->cmd = (acmd->islogical) ?
3460 3460 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3461 3461 pthru->cmd_status = 0x0;
3462 3462 pthru->scsi_status = 0x0;
3463 3463 pthru->target_id = acmd->device_id;
3464 3464 pthru->lun = 0;
3465 3465 pthru->cdb_len = acmd->cmd_cdblen;
3466 3466 pthru->timeout = 0;
3467 3467 pthru->flags = flags;
3468 3468 pthru->data_xfer_len = acmd->cmd_dmacount;
3469 3469 pthru->sge_count = acmd->cmd_cookiecnt;
3470 3470 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl;
3471 3471
3472 3472 bzero(cmd->sense, SENSE_LENGTH);
3473 3473 pthru->sense_len = SENSE_LENGTH;
3474 3474 pthru->sense_buf_phys_addr_hi = 0;
3475 3475 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3476 3476
3477 3477 context = pthru->context;
3478 3478
3479 3479 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3480 3480
3481 3481 break;
3482 3482 }
3483 3483 #ifdef lint
3484 3484 context = context;
3485 3485 #endif
3486 3486 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3487 3487
3488 3488 /* prepare the scatter-gather list for the firmware */
3489 3489 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3490 3490 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3491 3491 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size;
3492 3492 }
3493 3493
3494 3494 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3495 3495
3496 3496 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3497 3497 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3498 3498
3499 3499 if (cmd->frame_count >= 8) {
3500 3500 cmd->frame_count = 8;
3501 3501 }
3502 3502
3503 3503 return (cmd);
3504 3504 }
3505 3505
3506 3506 /*
3507 3507 * wait_for_outstanding - Wait for all outstanding cmds
3508 3508 * @instance: Adapter soft state
3509 3509 *
3510 3510 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3511 3511 * complete all its outstanding commands. Returns error if one or more IOs
3512 3512 * are pending after this time period.
3513 3513 */
3514 3514 static int
3515 3515 wait_for_outstanding(struct megasas_instance *instance)
3516 3516 {
3517 3517 int i;
3518 3518 uint32_t wait_time = 90;
3519 3519
3520 3520 for (i = 0; i < wait_time; i++) {
3521 3521 if (!instance->fw_outstanding) {
3522 3522 break;
3523 3523 }
3524 3524
3525 3525 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3526 3526 }
3527 3527
3528 3528 if (instance->fw_outstanding) {
3529 3529 return (1);
3530 3530 }
3531 3531
3532 3532 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3533 3533
3534 3534 return (0);
3535 3535 }
3536 3536
3537 3537 /*
3538 3538 * issue_mfi_pthru
3539 3539 */
3540 3540 static int
3541 3541 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3542 3542 struct megasas_cmd *cmd, int mode)
3543 3543 {
3544 3544 void *ubuf;
3545 3545 uint32_t kphys_addr = 0;
3546 3546 uint32_t xferlen = 0;
3547 3547 uint_t model;
3548 3548
3549 3549 dma_obj_t pthru_dma_obj;
3550 3550 struct megasas_pthru_frame *kpthru;
3551 3551 struct megasas_pthru_frame *pthru;
3552 3552
3553 3553 pthru = &cmd->frame->pthru;
3554 3554 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3555 3555
3556 3556 model = ddi_model_convert_from(mode & FMODELS);
3557 3557 if (model == DDI_MODEL_ILP32) {
3558 3558 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3559 3559
3560 3560 xferlen = kpthru->sgl.sge32[0].length;
3561 3561
3562 3562 /* SJ! - ubuf needs to be virtual address. */
3563 3563 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3564 3564 } else {
3565 3565 #ifdef _ILP32
3566 3566 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3567 3567 xferlen = kpthru->sgl.sge32[0].length;
3568 3568 /* SJ! - ubuf needs to be virtual address. */
3569 3569 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3570 3570 #else
3571 3571 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3572 3572 xferlen = kpthru->sgl.sge64[0].length;
3573 3573 /* SJ! - ubuf needs to be virtual address. */
3574 3574 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3575 3575 #endif
3576 3576 }
3577 3577
3578 3578 if (xferlen) {
3579 3579 /* means IOCTL requires DMA */
3580 3580 /* allocate the data transfer buffer */
3581 3581 pthru_dma_obj.size = xferlen;
3582 3582 pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3583 3583 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3584 3584 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3585 3585 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3586 3586 pthru_dma_obj.dma_attr.dma_attr_align = 1;
3587 3587
3588 3588 /* allocate kernel buffer for DMA */
3589 3589 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3590 3590 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3591 3591 "could not data transfer buffer alloc."));
3592 3592 return (DDI_FAILURE);
3593 3593 }
3594 3594
3595 3595 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3596 3596 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3597 3597 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3598 3598 xferlen, mode)) {
3599 3599 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3600 3600 "copy from user space failed\n"));
3601 3601 return (1);
3602 3602 }
3603 3603 }
3604 3604
3605 3605 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3606 3606 }
3607 3607
3608 3608 pthru->cmd = kpthru->cmd;
3609 3609 pthru->sense_len = kpthru->sense_len;
3610 3610 pthru->cmd_status = kpthru->cmd_status;
3611 3611 pthru->scsi_status = kpthru->scsi_status;
3612 3612 pthru->target_id = kpthru->target_id;
3613 3613 pthru->lun = kpthru->lun;
3614 3614 pthru->cdb_len = kpthru->cdb_len;
3615 3615 pthru->sge_count = kpthru->sge_count;
3616 3616 pthru->timeout = kpthru->timeout;
3617 3617 pthru->data_xfer_len = kpthru->data_xfer_len;
3618 3618
3619 3619 pthru->sense_buf_phys_addr_hi = 0;
3620 3620 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3621 3621 pthru->sense_buf_phys_addr_lo = 0;
3622 3622
3623 3623 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3624 3624
3625 3625 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64;
3626 3626 pthru->sgl.sge32[0].length = xferlen;
3627 3627 pthru->sgl.sge32[0].phys_addr = kphys_addr;
3628 3628
3629 3629 cmd->sync_cmd = MEGASAS_TRUE;
3630 3630 cmd->frame_count = 1;
3631 3631
3632 3632 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3633 3633 con_log(CL_ANN, (CE_WARN,
3634 3634 "issue_mfi_pthru: fw_ioctl failed\n"));
3635 3635 } else {
3636 3636 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3637 3637
3638 3638 if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3639 3639 xferlen, mode)) {
3640 3640 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3641 3641 "copy to user space failed\n"));
3642 3642 return (1);
3643 3643 }
3644 3644 }
3645 3645 }
3646 3646
3647 3647 kpthru->cmd_status = pthru->cmd_status;
3648 3648 kpthru->scsi_status = pthru->scsi_status;
3649 3649
3650 3650 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3651 3651 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3652 3652
3653 3653 if (xferlen) {
3654 3654 /* free kernel buffer */
3655 3655 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3656 3656 return (1);
3657 3657 }
3658 3658
3659 3659 return (0);
3660 3660 }
3661 3661
3662 3662 /*
3663 3663 * issue_mfi_dcmd
3664 3664 */
3665 3665 static int
3666 3666 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3667 3667 struct megasas_cmd *cmd, int mode)
3668 3668 {
3669 3669 void *ubuf;
3670 3670 uint32_t kphys_addr = 0;
3671 3671 uint32_t xferlen = 0;
3672 3672 uint32_t model;
3673 3673 dma_obj_t dcmd_dma_obj;
3674 3674 struct megasas_dcmd_frame *kdcmd;
3675 3675 struct megasas_dcmd_frame *dcmd;
3676 3676
3677 3677 dcmd = &cmd->frame->dcmd;
3678 3678 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3679 3679
3680 3680 model = ddi_model_convert_from(mode & FMODELS);
3681 3681 if (model == DDI_MODEL_ILP32) {
3682 3682 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3683 3683
3684 3684 xferlen = kdcmd->sgl.sge32[0].length;
3685 3685
3686 3686 /* SJ! - ubuf needs to be virtual address. */
3687 3687 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3688 3688 }
3689 3689 else
3690 3690 {
3691 3691 #ifdef _ILP32
3692 3692 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3693 3693 xferlen = kdcmd->sgl.sge32[0].length;
3694 3694 /* SJ! - ubuf needs to be virtual address. */
3695 3695 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3696 3696 #else
3697 3697 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3698 3698 xferlen = kdcmd->sgl.sge64[0].length;
3699 3699 /* SJ! - ubuf needs to be virtual address. */
3700 3700 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3701 3701 #endif
3702 3702 }
3703 3703 if (xferlen) {
3704 3704 /* means IOCTL requires DMA */
3705 3705 /* allocate the data transfer buffer */
3706 3706 dcmd_dma_obj.size = xferlen;
3707 3707 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3708 3708 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3709 3709 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3710 3710 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3711 3711 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3712 3712
3713 3713 /* allocate kernel buffer for DMA */
3714 3714 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3715 3715 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3716 3716 "could not data transfer buffer alloc."));
3717 3717 return (DDI_FAILURE);
3718 3718 }
3719 3719
3720 3720 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3721 3721 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3722 3722 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3723 3723 xferlen, mode)) {
3724 3724 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3725 3725 "copy from user space failed\n"));
3726 3726 return (1);
3727 3727 }
3728 3728 }
3729 3729
3730 3730 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3731 3731 }
3732 3732
3733 3733 dcmd->cmd = kdcmd->cmd;
3734 3734 dcmd->cmd_status = kdcmd->cmd_status;
3735 3735 dcmd->sge_count = kdcmd->sge_count;
3736 3736 dcmd->timeout = kdcmd->timeout;
3737 3737 dcmd->data_xfer_len = kdcmd->data_xfer_len;
3738 3738 dcmd->opcode = kdcmd->opcode;
3739 3739
3740 3740 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3741 3741
3742 3742 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64;
3743 3743 dcmd->sgl.sge32[0].length = xferlen;
3744 3744 dcmd->sgl.sge32[0].phys_addr = kphys_addr;
3745 3745
3746 3746 cmd->sync_cmd = MEGASAS_TRUE;
3747 3747 cmd->frame_count = 1;
3748 3748
3749 3749 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3750 3750 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3751 3751 } else {
3752 3752 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3753 3753
3754 3754 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3755 3755 xferlen, mode)) {
3756 3756 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3757 3757 "copy to user space failed\n"));
3758 3758 return (1);
3759 3759 }
3760 3760 }
3761 3761 }
3762 3762
3763 3763 kdcmd->cmd_status = dcmd->cmd_status;
3764 3764
3765 3765 if (xferlen) {
3766 3766 /* free kernel buffer */
3767 3767 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3768 3768 return (1);
3769 3769 }
3770 3770
3771 3771 return (0);
3772 3772 }
3773 3773
3774 3774 /*
3775 3775 * issue_mfi_smp
3776 3776 */
3777 3777 static int
3778 3778 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3779 3779 struct megasas_cmd *cmd, int mode)
3780 3780 {
3781 3781 void *request_ubuf;
3782 3782 void *response_ubuf;
3783 3783 uint32_t request_xferlen = 0;
3784 3784 uint32_t response_xferlen = 0;
3785 3785 uint_t model;
3786 3786 dma_obj_t request_dma_obj;
3787 3787 dma_obj_t response_dma_obj;
3788 3788 struct megasas_smp_frame *ksmp;
3789 3789 struct megasas_smp_frame *smp;
3790 3790 struct megasas_sge32 *sge32;
3791 3791 #ifndef _ILP32
3792 3792 struct megasas_sge64 *sge64;
3793 3793 #endif
3794 3794
3795 3795 smp = &cmd->frame->smp;
3796 3796 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3797 3797
3798 3798 model = ddi_model_convert_from(mode & FMODELS);
3799 3799 if (model == DDI_MODEL_ILP32) {
3800 3800 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3801 3801
3802 3802 sge32 = &ksmp->sgl[0].sge32[0];
3803 3803 response_xferlen = sge32[0].length;
3804 3804 request_xferlen = sge32[1].length;
3805 3805 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3806 3806 "response_xferlen = %x, request_xferlen = %x",
3807 3807 response_xferlen, request_xferlen));
3808 3808
3809 3809 /* SJ! - ubuf needs to be virtual address. */
3810 3810
3811 3811 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3812 3812 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3813 3813 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3814 3814 "response_ubuf = %p, request_ubuf = %p",
3815 3815 response_ubuf, request_ubuf));
3816 3816 } else {
3817 3817 #ifdef _ILP32
3818 3818 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3819 3819
3820 3820 sge32 = &ksmp->sgl[0].sge32[0];
3821 3821 response_xferlen = sge32[0].length;
3822 3822 request_xferlen = sge32[1].length;
3823 3823 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3824 3824 "response_xferlen = %x, request_xferlen = %x",
3825 3825 response_xferlen, request_xferlen));
3826 3826
3827 3827 /* SJ! - ubuf needs to be virtual address. */
3828 3828
3829 3829 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3830 3830 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3831 3831 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3832 3832 "response_ubuf = %p, request_ubuf = %p",
3833 3833 response_ubuf, request_ubuf));
3834 3834 #else
3835 3835 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3836 3836
3837 3837 sge64 = &ksmp->sgl[0].sge64[0];
3838 3838 response_xferlen = sge64[0].length;
3839 3839 request_xferlen = sge64[1].length;
3840 3840
3841 3841 /* SJ! - ubuf needs to be virtual address. */
3842 3842 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
3843 3843 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
3844 3844 #endif
3845 3845 }
3846 3846 if (request_xferlen) {
3847 3847 /* means IOCTL requires DMA */
3848 3848 /* allocate the data transfer buffer */
3849 3849 request_dma_obj.size = request_xferlen;
3850 3850 request_dma_obj.dma_attr = megasas_generic_dma_attr;
3851 3851 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3852 3852 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3853 3853 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3854 3854 request_dma_obj.dma_attr.dma_attr_align = 1;
3855 3855
3856 3856 /* allocate kernel buffer for DMA */
3857 3857 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3858 3858 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3859 3859 "could not data transfer buffer alloc."));
3860 3860 return (DDI_FAILURE);
3861 3861 }
3862 3862
3863 3863 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3864 3864 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3865 3865 request_xferlen, mode)) {
3866 3866 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3867 3867 "copy from user space failed\n"));
3868 3868 return (1);
3869 3869 }
3870 3870 }
3871 3871
3872 3872 if (response_xferlen) {
3873 3873 /* means IOCTL requires DMA */
3874 3874 /* allocate the data transfer buffer */
3875 3875 response_dma_obj.size = response_xferlen;
3876 3876 response_dma_obj.dma_attr = megasas_generic_dma_attr;
3877 3877 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3878 3878 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3879 3879 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3880 3880 response_dma_obj.dma_attr.dma_attr_align = 1;
3881 3881
3882 3882 /* allocate kernel buffer for DMA */
3883 3883 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3884 3884 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3885 3885 "could not data transfer buffer alloc."));
3886 3886 return (DDI_FAILURE);
3887 3887 }
3888 3888
3889 3889 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3890 3890 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3891 3891 response_xferlen, mode)) {
3892 3892 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3893 3893 "copy from user space failed\n"));
3894 3894 return (1);
3895 3895 }
3896 3896 }
3897 3897
3898 3898 smp->cmd = ksmp->cmd;
3899 3899 smp->cmd_status = ksmp->cmd_status;
3900 3900 smp->connection_status = ksmp->connection_status;
3901 3901 smp->sge_count = ksmp->sge_count;
3902 3902 /* smp->context = ksmp->context; */
3903 3903 smp->timeout = ksmp->timeout;
3904 3904 smp->data_xfer_len = ksmp->data_xfer_len;
3905 3905
3906 3906 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3907 3907 sizeof (uint64_t));
3908 3908
3909 3909 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64;
3910 3910
3911 3911 model = ddi_model_convert_from(mode & FMODELS);
3912 3912 if (model == DDI_MODEL_ILP32) {
3913 3913 con_log(CL_ANN1, (CE_NOTE,
3914 3914 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3915 3915
3916 3916 sge32 = &smp->sgl[0].sge32[0];
3917 3917 sge32[0].length = response_xferlen;
3918 3918 sge32[0].phys_addr =
3919 3919 response_dma_obj.dma_cookie[0].dmac_address;
3920 3920 sge32[1].length = request_xferlen;
3921 3921 sge32[1].phys_addr =
3922 3922 request_dma_obj.dma_cookie[0].dmac_address;
3923 3923 } else {
3924 3924 #ifdef _ILP32
3925 3925 con_log(CL_ANN1, (CE_NOTE,
3926 3926 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3927 3927 sge32 = &smp->sgl[0].sge32[0];
3928 3928 sge32[0].length = response_xferlen;
3929 3929 sge32[0].phys_addr =
3930 3930 response_dma_obj.dma_cookie[0].dmac_address;
3931 3931 sge32[1].length = request_xferlen;
3932 3932 sge32[1].phys_addr =
3933 3933 request_dma_obj.dma_cookie[0].dmac_address;
3934 3934 #else
3935 3935 con_log(CL_ANN1, (CE_NOTE,
3936 3936 "issue_mfi_smp: DDI_MODEL_LP64"));
3937 3937 sge64 = &smp->sgl[0].sge64[0];
3938 3938 sge64[0].length = response_xferlen;
3939 3939 sge64[0].phys_addr =
3940 3940 response_dma_obj.dma_cookie[0].dmac_address;
3941 3941 sge64[1].length = request_xferlen;
3942 3942 sge64[1].phys_addr =
3943 3943 request_dma_obj.dma_cookie[0].dmac_address;
3944 3944 #endif
3945 3945 }
3946 3946 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3947 3947 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3948 3948 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3949 3949 smp->data_xfer_len));
3950 3950
3951 3951 cmd->sync_cmd = MEGASAS_TRUE;
3952 3952 cmd->frame_count = 1;
3953 3953
3954 3954 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3955 3955 con_log(CL_ANN, (CE_WARN,
3956 3956 "issue_mfi_smp: fw_ioctl failed\n"));
3957 3957 } else {
3958 3958 con_log(CL_ANN1, (CE_NOTE,
3959 3959 "issue_mfi_smp: copy to user space\n"));
3960 3960
3961 3961 if (request_xferlen) {
3962 3962 if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3963 3963 request_xferlen, mode)) {
3964 3964 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3965 3965 "copy to user space failed\n"));
3966 3966 return (1);
3967 3967 }
3968 3968 }
3969 3969
3970 3970 if (response_xferlen) {
3971 3971 if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3972 3972 response_xferlen, mode)) {
3973 3973 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3974 3974 "copy to user space failed\n"));
3975 3975 return (1);
3976 3976 }
3977 3977 }
3978 3978 }
3979 3979
3980 3980 ksmp->cmd_status = smp->cmd_status;
3981 3981 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3982 3982 smp->cmd_status));
3983 3983
3984 3984
3985 3985 if (request_xferlen) {
3986 3986 /* free kernel buffer */
3987 3987 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3988 3988 return (1);
3989 3989 }
3990 3990
3991 3991 if (response_xferlen) {
3992 3992 /* free kernel buffer */
3993 3993 if (mega_free_dma_obj(instance, response_dma_obj) !=
3994 3994 DDI_SUCCESS)
3995 3995 return (1);
3996 3996 }
3997 3997
3998 3998 return (0);
3999 3999 }
4000 4000
4001 4001 /*
4002 4002 * issue_mfi_stp
4003 4003 */
4004 4004 static int
4005 4005 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4006 4006 struct megasas_cmd *cmd, int mode)
4007 4007 {
4008 4008 void *fis_ubuf;
4009 4009 void *data_ubuf;
4010 4010 uint32_t fis_xferlen = 0;
4011 4011 uint32_t data_xferlen = 0;
4012 4012 uint_t model;
4013 4013 dma_obj_t fis_dma_obj;
4014 4014 dma_obj_t data_dma_obj;
4015 4015 struct megasas_stp_frame *kstp;
4016 4016 struct megasas_stp_frame *stp;
4017 4017
4018 4018 stp = &cmd->frame->stp;
4019 4019 kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4020 4020
4021 4021 model = ddi_model_convert_from(mode & FMODELS);
4022 4022 if (model == DDI_MODEL_ILP32) {
4023 4023 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4024 4024
4025 4025 fis_xferlen = kstp->sgl.sge32[0].length;
4026 4026 data_xferlen = kstp->sgl.sge32[1].length;
4027 4027
4028 4028 /* SJ! - ubuf needs to be virtual address. */
4029 4029 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4030 4030 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4031 4031 }
4032 4032 else
4033 4033 {
4034 4034 #ifdef _ILP32
4035 4035 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4036 4036
4037 4037 fis_xferlen = kstp->sgl.sge32[0].length;
4038 4038 data_xferlen = kstp->sgl.sge32[1].length;
4039 4039
4040 4040 /* SJ! - ubuf needs to be virtual address. */
4041 4041 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4042 4042 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4043 4043 #else
4044 4044 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4045 4045
4046 4046 fis_xferlen = kstp->sgl.sge64[0].length;
4047 4047 data_xferlen = kstp->sgl.sge64[1].length;
4048 4048
4049 4049 /* SJ! - ubuf needs to be virtual address. */
4050 4050 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4051 4051 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4052 4052 #endif
4053 4053 }
4054 4054
4055 4055
4056 4056 if (fis_xferlen) {
4057 4057 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4058 4058 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4059 4059
4060 4060 /* means IOCTL requires DMA */
4061 4061 /* allocate the data transfer buffer */
4062 4062 fis_dma_obj.size = fis_xferlen;
4063 4063 fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4064 4064 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4065 4065 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4066 4066 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4067 4067 fis_dma_obj.dma_attr.dma_attr_align = 1;
4068 4068
4069 4069 /* allocate kernel buffer for DMA */
4070 4070 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4071 4071 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4072 4072 "could not data transfer buffer alloc."));
4073 4073 return (DDI_FAILURE);
4074 4074 }
4075 4075
4076 4076 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4077 4077 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4078 4078 fis_xferlen, mode)) {
4079 4079 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4080 4080 "copy from user space failed\n"));
4081 4081 return (1);
4082 4082 }
4083 4083 }
4084 4084
4085 4085 if (data_xferlen) {
4086 4086 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4087 4087 "data_xferlen = %x", data_ubuf, data_xferlen));
4088 4088
4089 4089 /* means IOCTL requires DMA */
4090 4090 /* allocate the data transfer buffer */
4091 4091 data_dma_obj.size = data_xferlen;
4092 4092 data_dma_obj.dma_attr = megasas_generic_dma_attr;
4093 4093 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4094 4094 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4095 4095 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4096 4096 data_dma_obj.dma_attr.dma_attr_align = 1;
4097 4097
4098 4098 /* allocate kernel buffer for DMA */
4099 4099 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4100 4100 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4101 4101 "could not data transfer buffer alloc."));
4102 4102 return (DDI_FAILURE);
4103 4103 }
4104 4104
4105 4105 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4106 4106 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4107 4107 data_xferlen, mode)) {
4108 4108 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4109 4109 "copy from user space failed\n"));
4110 4110 return (1);
4111 4111 }
4112 4112 }
4113 4113
4114 4114 stp->cmd = kstp->cmd;
4115 4115 stp->cmd_status = kstp->cmd_status;
4116 4116 stp->connection_status = kstp->connection_status;
4117 4117 stp->target_id = kstp->target_id;
4118 4118 stp->sge_count = kstp->sge_count;
4119 4119 /* stp->context = kstp->context; */
4120 4120 stp->timeout = kstp->timeout;
4121 4121 stp->data_xfer_len = kstp->data_xfer_len;
4122 4122
4123 4123 bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4124 4124
4125 4125 stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4126 4126 stp->stp_flags = kstp->stp_flags;
4127 4127 stp->sgl.sge32[0].length = fis_xferlen;
4128 4128 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4129 4129 stp->sgl.sge32[1].length = data_xferlen;
4130 4130 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4131 4131
4132 4132 cmd->sync_cmd = MEGASAS_TRUE;
4133 4133 cmd->frame_count = 1;
4134 4134
4135 4135 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4136 4136 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4137 4137 } else {
4138 4138
4139 4139 if (fis_xferlen) {
4140 4140 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4141 4141 fis_xferlen, mode)) {
4142 4142 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4143 4143 "copy to user space failed\n"));
4144 4144 return (1);
4145 4145 }
4146 4146 }
4147 4147
4148 4148 if (data_xferlen) {
4149 4149 if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4150 4150 data_xferlen, mode)) {
4151 4151 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4152 4152 "copy to user space failed\n"));
4153 4153 return (1);
4154 4154 }
4155 4155 }
4156 4156 }
4157 4157
4158 4158 kstp->cmd_status = stp->cmd_status;
4159 4159
4160 4160 if (fis_xferlen) {
4161 4161 /* free kernel buffer */
4162 4162 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4163 4163 return (1);
4164 4164 }
4165 4165
4166 4166 if (data_xferlen) {
4167 4167 /* free kernel buffer */
4168 4168 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4169 4169 return (1);
4170 4170 }
4171 4171
4172 4172 return (0);
4173 4173 }
4174 4174
4175 4175 /*
4176 4176 * fill_up_drv_ver
4177 4177 */
4178 4178 static void
4179 4179 fill_up_drv_ver(struct megasas_drv_ver *dv)
4180 4180 {
4181 4181 (void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4182 4182
4183 4183 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4184 4184 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4185 4185 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4186 4186 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4187 4187 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4188 4188 strlen(MEGASAS_RELDATE));
4189 4189 }
4190 4190
4191 4191 /*
4192 4192 * handle_drv_ioctl
4193 4193 */
4194 4194 static int
4195 4195 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4196 4196 int mode)
4197 4197 {
4198 4198 int i;
4199 4199 int rval = 0;
4200 4200 int *props = NULL;
4201 4201 void *ubuf;
4202 4202
4203 4203 uint8_t *pci_conf_buf;
4204 4204 uint32_t xferlen;
4205 4205 uint32_t num_props;
4206 4206 uint_t model;
4207 4207 struct megasas_dcmd_frame *kdcmd;
4208 4208 struct megasas_drv_ver dv;
4209 4209 struct megasas_pci_information pi;
4210 4210
4211 4211 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4212 4212
4213 4213 model = ddi_model_convert_from(mode & FMODELS);
4214 4214 if (model == DDI_MODEL_ILP32) {
4215 4215 con_log(CL_ANN1, (CE_NOTE,
4216 4216 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4217 4217
4218 4218 xferlen = kdcmd->sgl.sge32[0].length;
4219 4219
4220 4220 /* SJ! - ubuf needs to be virtual address. */
4221 4221 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4222 4222 } else {
4223 4223 #ifdef _ILP32
4224 4224 con_log(CL_ANN1, (CE_NOTE,
4225 4225 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4226 4226 xferlen = kdcmd->sgl.sge32[0].length;
4227 4227 /* SJ! - ubuf needs to be virtual address. */
4228 4228 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4229 4229 #else
4230 4230 con_log(CL_ANN1, (CE_NOTE,
4231 4231 "handle_drv_ioctl: DDI_MODEL_LP64"));
4232 4232 xferlen = kdcmd->sgl.sge64[0].length;
4233 4233 /* SJ! - ubuf needs to be virtual address. */
4234 4234 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4235 4235 #endif
4236 4236 }
4237 4237 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4238 4238 "dataBuf=%p size=%d bytes", ubuf, xferlen));
4239 4239
4240 4240 switch (kdcmd->opcode) {
4241 4241 case MR_DRIVER_IOCTL_DRIVER_VERSION:
4242 4242 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4243 4243 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4244 4244
4245 4245 fill_up_drv_ver(&dv);
4246 4246
4247 4247 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4248 4248 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4249 4249 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4250 4250 "copy to user space failed\n"));
4251 4251 kdcmd->cmd_status = 1;
4252 4252 rval = 1;
4253 4253 } else {
4254 4254 kdcmd->cmd_status = 0;
4255 4255 }
4256 4256 break;
4257 4257 case MR_DRIVER_IOCTL_PCI_INFORMATION:
4258 4258 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4259 4259 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4260 4260
4261 4261 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4262 4262 0, "reg", &props, &num_props)) {
4263 4263 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4264 4264 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4265 4265 "ddi_prop_look_int_array failed\n"));
4266 4266 rval = 1;
4267 4267 } else {
4268 4268
4269 4269 pi.busNumber = (props[0] >> 16) & 0xFF;
4270 4270 pi.deviceNumber = (props[0] >> 11) & 0x1f;
4271 4271 pi.functionNumber = (props[0] >> 8) & 0x7;
4272 4272 ddi_prop_free((void *)props);
4273 4273 }
4274 4274
4275 4275 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4276 4276
4277 4277 for (i = 0; i < (sizeof (struct megasas_pci_information) -
4278 4278 offsetof(struct megasas_pci_information, pciHeaderInfo));
4279 4279 i++) {
4280 4280 pci_conf_buf[i] =
4281 4281 pci_config_get8(instance->pci_handle, i);
4282 4282 }
4283 4283
4284 4284 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4285 4285 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4286 4286 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4287 4287 "copy to user space failed\n"));
4288 4288 kdcmd->cmd_status = 1;
4289 4289 rval = 1;
4290 4290 } else {
4291 4291 kdcmd->cmd_status = 0;
4292 4292 }
4293 4293 break;
4294 4294 default:
4295 4295 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4296 4296 "invalid driver specific IOCTL opcode = 0x%x",
4297 4297 kdcmd->opcode));
4298 4298 kdcmd->cmd_status = 1;
4299 4299 rval = 1;
4300 4300 break;
4301 4301 }
4302 4302
4303 4303 return (rval);
4304 4304 }
4305 4305
4306 4306 /*
4307 4307 * handle_mfi_ioctl
4308 4308 */
4309 4309 static int
4310 4310 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4311 4311 int mode)
4312 4312 {
4313 4313 int rval = 0;
4314 4314
4315 4315 struct megasas_header *hdr;
4316 4316 struct megasas_cmd *cmd;
4317 4317
4318 4318 cmd = get_mfi_pkt(instance);
4319 4319
4320 4320 if (!cmd) {
4321 4321 con_log(CL_ANN, (CE_WARN, "megasas: "
4322 4322 "failed to get a cmd packet\n"));
4323 4323 return (1);
4324 4324 }
4325 4325
4326 4326 hdr = (struct megasas_header *)&ioctl->frame[0];
4327 4327
4328 4328 switch (hdr->cmd) {
4329 4329 case MFI_CMD_OP_DCMD:
4330 4330 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4331 4331 break;
4332 4332 case MFI_CMD_OP_SMP:
4333 4333 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4334 4334 break;
4335 4335 case MFI_CMD_OP_STP:
4336 4336 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4337 4337 break;
4338 4338 case MFI_CMD_OP_LD_SCSI:
4339 4339 case MFI_CMD_OP_PD_SCSI:
4340 4340 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4341 4341 break;
4342 4342 default:
4343 4343 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4344 4344 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4345 4345 rval = 1;
4346 4346 break;
4347 4347 }
4348 4348
4349 4349
4350 4350 return_mfi_pkt(instance, cmd);
4351 4351 if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4352 4352 rval = 1;
4353 4353 return (rval);
4354 4354 }
4355 4355
4356 4356 /*
4357 4357 * AEN
4358 4358 */
4359 4359 static int
4360 4360 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4361 4361 {
4362 4362 int rval = 0;
4363 4363
4364 4364 rval = register_mfi_aen(instance, instance->aen_seq_num,
4365 4365 aen->class_locale_word);
4366 4366
4367 4367 aen->cmd_status = (uint8_t)rval;
4368 4368
4369 4369 return (rval);
4370 4370 }
4371 4371
4372 4372 static int
4373 4373 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4374 4374 uint32_t class_locale_word)
4375 4375 {
4376 4376 int ret_val;
4377 4377
4378 4378 struct megasas_cmd *cmd;
4379 4379 struct megasas_dcmd_frame *dcmd;
4380 4380 union megasas_evt_class_locale curr_aen;
4381 4381 union megasas_evt_class_locale prev_aen;
4382 4382
4383 4383 /*
4384 4384 * If there an AEN pending already (aen_cmd), check if the
4385 4385 * class_locale of that pending AEN is inclusive of the new
4386 4386 * AEN request we currently have. If it is, then we don't have
4387 4387 * to do anything. In other words, whichever events the current
4388 4388 * AEN request is subscribing to, have already been subscribed
4389 4389 * to.
4390 4390 *
4391 4391 * If the old_cmd is _not_ inclusive, then we have to abort
4392 4392 * that command, form a class_locale that is superset of both
4393 4393 * old and current and re-issue to the FW
4394 4394 */
4395 4395
4396 4396 curr_aen.word = class_locale_word;
4397 4397
4398 4398 if (instance->aen_cmd) {
4399 4399 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4400 4400
4401 4401 /*
4402 4402 * A class whose enum value is smaller is inclusive of all
4403 4403 * higher values. If a PROGRESS (= -1) was previously
4404 4404 * registered, then a new registration requests for higher
4405 4405 * classes need not be sent to FW. They are automatically
4406 4406 * included.
4407 4407 *
4408 4408 * Locale numbers don't have such hierarchy. They are bitmap
4409 4409 * values
4410 4410 */
4411 4411 if ((prev_aen.members.class <= curr_aen.members.class) &&
4412 4412 !((prev_aen.members.locale & curr_aen.members.locale) ^
4413 4413 curr_aen.members.locale)) {
4414 4414 /*
4415 4415 * Previously issued event registration includes
4416 4416 * current request. Nothing to do.
4417 4417 */
4418 4418
4419 4419 return (0);
4420 4420 } else {
4421 4421 curr_aen.members.locale |= prev_aen.members.locale;
4422 4422
4423 4423 if (prev_aen.members.class < curr_aen.members.class)
4424 4424 curr_aen.members.class = prev_aen.members.class;
4425 4425
4426 4426 ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4427 4427
4428 4428 if (ret_val) {
4429 4429 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4430 4430 "failed to abort prevous AEN command\n"));
4431 4431
4432 4432 return (ret_val);
4433 4433 }
4434 4434 }
4435 4435 } else {
4436 4436 curr_aen.word = class_locale_word;
4437 4437 }
4438 4438
4439 4439 cmd = get_mfi_pkt(instance);
4440 4440
4441 4441 if (!cmd)
4442 4442 return (-ENOMEM);
4443 4443
4444 4444 dcmd = &cmd->frame->dcmd;
4445 4445
4446 4446 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4447 4447 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4448 4448
4449 4449 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4450 4450 sizeof (struct megasas_evt_detail));
4451 4451
4452 4452 /* Prepare DCMD for aen registration */
4453 4453 dcmd->cmd = MFI_CMD_OP_DCMD;
4454 4454 dcmd->cmd_status = 0x0;
4455 4455 dcmd->sge_count = 1;
4456 4456 dcmd->flags = MFI_FRAME_DIR_READ;
4457 4457 dcmd->timeout = 0;
4458 4458 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4459 4459 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4460 4460 dcmd->mbox.w[0] = seq_num;
4461 4461 dcmd->mbox.w[1] = curr_aen.word;
4462 4462 dcmd->sgl.sge32[0].phys_addr =
4463 4463 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4464 4464 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4465 4465
4466 4466 instance->aen_seq_num = seq_num;
4467 4467
4468 4468 /*
4469 4469 * Store reference to the cmd used to register for AEN. When an
4470 4470 * application wants us to register for AEN, we have to abort this
4471 4471 * cmd and re-register with a new EVENT LOCALE supplied by that app
4472 4472 */
4473 4473 instance->aen_cmd = cmd;
4474 4474
4475 4475 cmd->frame_count = 1;
4476 4476
4477 4477 /* Issue the aen registration frame */
4478 4478 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4479 4479 instance->func_ptr->issue_cmd(cmd, instance);
4480 4480
4481 4481 return (0);
4482 4482 }
4483 4483
4484 4484 static void
4485 4485 display_scsi_inquiry(caddr_t scsi_inq)
4486 4486 {
4487 4487 #define MAX_SCSI_DEVICE_CODE 14
4488 4488 int i;
4489 4489 char inquiry_buf[256] = {0};
4490 4490 int len;
4491 4491 const char *const scsi_device_types[] = {
4492 4492 "Direct-Access ",
4493 4493 "Sequential-Access",
4494 4494 "Printer ",
4495 4495 "Processor ",
4496 4496 "WORM ",
4497 4497 "CD-ROM ",
4498 4498 "Scanner ",
4499 4499 "Optical Device ",
4500 4500 "Medium Changer ",
4501 4501 "Communications ",
4502 4502 "Unknown ",
4503 4503 "Unknown ",
4504 4504 "Unknown ",
4505 4505 "Enclosure ",
4506 4506 };
4507 4507
4508 4508 len = 0;
4509 4509
4510 4510 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
4511 4511 for (i = 8; i < 16; i++) {
4512 4512 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4513 4513 scsi_inq[i]);
4514 4514 }
4515 4515
4516 4516 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
4517 4517
4518 4518 for (i = 16; i < 32; i++) {
4519 4519 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4520 4520 scsi_inq[i]);
4521 4521 }
4522 4522
4523 4523 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
4524 4524
4525 4525 for (i = 32; i < 36; i++) {
4526 4526 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4527 4527 scsi_inq[i]);
4528 4528 }
4529 4529
4530 4530 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4531 4531
4532 4532
4533 4533 i = scsi_inq[0] & 0x1f;
4534 4534
4535 4535
4536 4536 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
4537 4537 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4538 4538 "Unknown ");
4539 4539
4540 4540
4541 4541 len += snprintf(inquiry_buf + len, 265 - len,
4542 4542 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4543 4543
4544 4544 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4545 4545 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4546 4546 } else {
4547 4547 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4548 4548 }
4549 4549
4550 4550 con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4551 4551 }
4552 4552
4553 4553 static int
4554 4554 read_fw_status_reg_xscale(struct megasas_instance *instance)
4555 4555 {
4556 4556 return ((int)RD_OB_MSG_0(instance));
4557 4557 }
↓ open down ↓ |
4557 lines elided |
↑ open up ↑ |
4558 4558
4559 4559 static int
4560 4560 read_fw_status_reg_ppc(struct megasas_instance *instance)
4561 4561 {
4562 4562 return ((int)RD_OB_SCRATCH_PAD_0(instance));
4563 4563 }
4564 4564
4565 4565 static void
4566 4566 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4567 4567 {
4568 - atomic_add_16(&instance->fw_outstanding, 1);
4568 + atomic_inc_16(&instance->fw_outstanding);
4569 4569
4570 4570 /* Issue the command to the FW */
4571 4571 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4572 4572 (cmd->frame_count - 1), instance);
4573 4573 }
4574 4574
4575 4575 static void
4576 4576 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4577 4577 {
4578 - atomic_add_16(&instance->fw_outstanding, 1);
4578 + atomic_inc_16(&instance->fw_outstanding);
4579 4579
4580 4580 /* Issue the command to the FW */
4581 4581 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4582 4582 (((cmd->frame_count - 1) << 1) | 1), instance);
4583 4583 }
4584 4584
4585 4585 /*
4586 4586 * issue_cmd_in_sync_mode
4587 4587 */
4588 4588 static int
4589 4589 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4590 4590 struct megasas_cmd *cmd)
4591 4591 {
4592 4592 int i;
4593 4593 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4594 4594
4595 4595 cmd->cmd_status = ENODATA;
4596 4596
4597 4597 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4598 4598 (cmd->frame_count - 1), instance);
4599 4599
4600 4600 mutex_enter(&instance->int_cmd_mtx);
4601 4601
4602 4602 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4603 4603 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4604 4604 }
4605 4605
4606 4606 mutex_exit(&instance->int_cmd_mtx);
4607 4607
4608 4608 if (i < (msecs -1)) {
4609 4609 return (0);
4610 4610 } else {
4611 4611 return (1);
4612 4612 }
4613 4613 }
4614 4614
4615 4615 static int
4616 4616 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4617 4617 struct megasas_cmd *cmd)
4618 4618 {
4619 4619 int i;
4620 4620 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4621 4621
4622 4622 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4623 4623
4624 4624 cmd->cmd_status = ENODATA;
4625 4625
4626 4626 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4627 4627 (((cmd->frame_count - 1) << 1) | 1), instance);
4628 4628
4629 4629 mutex_enter(&instance->int_cmd_mtx);
4630 4630
4631 4631 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4632 4632 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4633 4633 }
4634 4634
4635 4635 mutex_exit(&instance->int_cmd_mtx);
4636 4636
4637 4637 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4638 4638
4639 4639 if (i < (msecs -1)) {
4640 4640 return (0);
4641 4641 } else {
4642 4642 return (1);
4643 4643 }
4644 4644 }
4645 4645
4646 4646 /*
4647 4647 * issue_cmd_in_poll_mode
4648 4648 */
4649 4649 static int
4650 4650 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4651 4651 struct megasas_cmd *cmd)
4652 4652 {
4653 4653 int i;
4654 4654 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4655 4655 struct megasas_header *frame_hdr;
4656 4656
4657 4657 frame_hdr = (struct megasas_header *)cmd->frame;
4658 4658 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4659 4659 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4660 4660
4661 4661 /* issue the frame using inbound queue port */
4662 4662 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4663 4663 (cmd->frame_count - 1), instance);
4664 4664
4665 4665 /* wait for cmd_status to change from 0xFF */
4666 4666 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4667 4667 MFI_CMD_STATUS_POLL_MODE); i++) {
4668 4668 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4669 4669 }
4670 4670
4671 4671 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4672 4672 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4673 4673 "cmd polling timed out"));
4674 4674 return (DDI_FAILURE);
4675 4675 }
4676 4676
4677 4677 return (DDI_SUCCESS);
4678 4678 }
4679 4679
4680 4680 static int
4681 4681 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4682 4682 struct megasas_cmd *cmd)
4683 4683 {
4684 4684 int i;
4685 4685 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4686 4686 struct megasas_header *frame_hdr;
4687 4687
4688 4688 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4689 4689
4690 4690 frame_hdr = (struct megasas_header *)cmd->frame;
4691 4691 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4692 4692 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4693 4693
4694 4694 /* issue the frame using inbound queue port */
4695 4695 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4696 4696 (((cmd->frame_count - 1) << 1) | 1), instance);
4697 4697
4698 4698 /* wait for cmd_status to change from 0xFF */
4699 4699 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4700 4700 MFI_CMD_STATUS_POLL_MODE); i++) {
4701 4701 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4702 4702 }
4703 4703
4704 4704 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4705 4705 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4706 4706 "cmd polling timed out"));
4707 4707 return (DDI_FAILURE);
4708 4708 }
4709 4709
4710 4710 return (DDI_SUCCESS);
4711 4711 }
4712 4712
4713 4713 static void
4714 4714 enable_intr_xscale(struct megasas_instance *instance)
4715 4715 {
4716 4716 MFI_ENABLE_INTR(instance);
4717 4717 }
4718 4718
4719 4719 static void
4720 4720 enable_intr_ppc(struct megasas_instance *instance)
4721 4721 {
4722 4722 uint32_t mask;
4723 4723
4724 4724 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4725 4725
4726 4726 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4727 4727 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4728 4728
4729 4729 /*
4730 4730 * As 1078DE is same as 1078 chip, the interrupt mask
4731 4731 * remains the same.
4732 4732 */
4733 4733 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4734 4734 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4735 4735
4736 4736 /* dummy read to force PCI flush */
4737 4737 mask = RD_OB_INTR_MASK(instance);
4738 4738
4739 4739 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4740 4740 "outbound_intr_mask = 0x%x\n", mask));
4741 4741 }
4742 4742
4743 4743 static void
4744 4744 disable_intr_xscale(struct megasas_instance *instance)
4745 4745 {
4746 4746 MFI_DISABLE_INTR(instance);
4747 4747 }
4748 4748
4749 4749 static void
4750 4750 disable_intr_ppc(struct megasas_instance *instance)
4751 4751 {
4752 4752 uint32_t mask;
4753 4753
4754 4754 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4755 4755
4756 4756 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4757 4757 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4758 4758
4759 4759 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4760 4760 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4761 4761
4762 4762 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4763 4763 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4764 4764
4765 4765 /* dummy read to force PCI flush */
4766 4766 mask = RD_OB_INTR_MASK(instance);
4767 4767 #ifdef lint
4768 4768 mask = mask;
4769 4769 #endif
4770 4770 }
4771 4771
4772 4772 static int
4773 4773 intr_ack_xscale(struct megasas_instance *instance)
4774 4774 {
4775 4775 uint32_t status;
4776 4776
4777 4777 /* check if it is our interrupt */
4778 4778 status = RD_OB_INTR_STATUS(instance);
4779 4779
4780 4780 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4781 4781 return (DDI_INTR_UNCLAIMED);
4782 4782 }
4783 4783
4784 4784 /* clear the interrupt by writing back the same value */
4785 4785 WR_OB_INTR_STATUS(status, instance);
4786 4786
4787 4787 return (DDI_INTR_CLAIMED);
4788 4788 }
4789 4789
4790 4790 static int
4791 4791 intr_ack_ppc(struct megasas_instance *instance)
4792 4792 {
4793 4793 uint32_t status;
4794 4794
4795 4795 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4796 4796
4797 4797 /* check if it is our interrupt */
4798 4798 status = RD_OB_INTR_STATUS(instance);
4799 4799
4800 4800 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4801 4801
4802 4802 /*
4803 4803 * As 1078DE is same as 1078 chip, the status field
4804 4804 * remains the same.
4805 4805 */
4806 4806 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4807 4807 return (DDI_INTR_UNCLAIMED);
4808 4808 }
4809 4809
4810 4810 /* clear the interrupt by writing back the same value */
4811 4811 WR_OB_DOORBELL_CLEAR(status, instance);
4812 4812
4813 4813 /* dummy READ */
4814 4814 status = RD_OB_INTR_STATUS(instance);
4815 4815
4816 4816 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4817 4817
4818 4818 return (DDI_INTR_CLAIMED);
4819 4819 }
4820 4820
4821 4821 static int
4822 4822 megasas_common_check(struct megasas_instance *instance,
4823 4823 struct megasas_cmd *cmd)
4824 4824 {
4825 4825 int ret = DDI_SUCCESS;
4826 4826
4827 4827 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4828 4828 DDI_SUCCESS) {
4829 4829 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4830 4830 if (cmd->pkt != NULL) {
4831 4831 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4832 4832 cmd->pkt->pkt_statistics = 0;
4833 4833 }
4834 4834 ret = DDI_FAILURE;
4835 4835 }
4836 4836 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4837 4837 != DDI_SUCCESS) {
4838 4838 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4839 4839 if (cmd->pkt != NULL) {
4840 4840 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4841 4841 cmd->pkt->pkt_statistics = 0;
4842 4842 }
4843 4843 ret = DDI_FAILURE;
4844 4844 }
4845 4845 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4846 4846 DDI_SUCCESS) {
4847 4847 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4848 4848 if (cmd->pkt != NULL) {
4849 4849 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4850 4850 cmd->pkt->pkt_statistics = 0;
4851 4851 }
4852 4852 ret = DDI_FAILURE;
4853 4853 }
4854 4854 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4855 4855 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4856 4856 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4857 4857 if (cmd->pkt != NULL) {
4858 4858 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4859 4859 cmd->pkt->pkt_statistics = 0;
4860 4860 }
4861 4861 ret = DDI_FAILURE;
4862 4862 }
4863 4863
4864 4864 return (ret);
4865 4865 }
4866 4866
4867 4867 /*ARGSUSED*/
4868 4868 static int
4869 4869 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4870 4870 {
4871 4871 /*
4872 4872 * as the driver can always deal with an error in any dma or
4873 4873 * access handle, we can just return the fme_status value.
4874 4874 */
4875 4875 pci_ereport_post(dip, err, NULL);
4876 4876 return (err->fme_status);
4877 4877 }
4878 4878
4879 4879 static void
4880 4880 megasas_fm_init(struct megasas_instance *instance)
4881 4881 {
4882 4882 /* Need to change iblock to priority for new MSI intr */
4883 4883 ddi_iblock_cookie_t fm_ibc;
4884 4884
4885 4885 /* Only register with IO Fault Services if we have some capability */
4886 4886 if (instance->fm_capabilities) {
4887 4887 /* Adjust access and dma attributes for FMA */
4888 4888 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4889 4889 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4890 4890
4891 4891 /*
4892 4892 * Register capabilities with IO Fault Services.
4893 4893 * fm_capabilities will be updated to indicate
4894 4894 * capabilities actually supported (not requested.)
4895 4895 */
4896 4896
4897 4897 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4898 4898
4899 4899 /*
4900 4900 * Initialize pci ereport capabilities if ereport
4901 4901 * capable (should always be.)
4902 4902 */
4903 4903
4904 4904 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4905 4905 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4906 4906 pci_ereport_setup(instance->dip);
4907 4907 }
4908 4908
4909 4909 /*
4910 4910 * Register error callback if error callback capable.
4911 4911 */
4912 4912 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4913 4913 ddi_fm_handler_register(instance->dip,
4914 4914 megasas_fm_error_cb, (void*) instance);
4915 4915 }
4916 4916 } else {
4917 4917 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4918 4918 megasas_generic_dma_attr.dma_attr_flags = 0;
4919 4919 }
4920 4920 }
4921 4921
4922 4922 static void
4923 4923 megasas_fm_fini(struct megasas_instance *instance)
4924 4924 {
4925 4925 /* Only unregister FMA capabilities if registered */
4926 4926 if (instance->fm_capabilities) {
4927 4927 /*
4928 4928 * Un-register error callback if error callback capable.
4929 4929 */
4930 4930 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4931 4931 ddi_fm_handler_unregister(instance->dip);
4932 4932 }
4933 4933
4934 4934 /*
4935 4935 * Release any resources allocated by pci_ereport_setup()
4936 4936 */
4937 4937 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4938 4938 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4939 4939 pci_ereport_teardown(instance->dip);
4940 4940 }
4941 4941
4942 4942 /* Unregister from IO Fault Services */
4943 4943 ddi_fm_fini(instance->dip);
4944 4944
4945 4945 /* Adjust access and dma attributes for FMA */
4946 4946 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4947 4947 megasas_generic_dma_attr.dma_attr_flags = 0;
4948 4948 }
4949 4949 }
4950 4950
4951 4951 int
4952 4952 megasas_check_acc_handle(ddi_acc_handle_t handle)
4953 4953 {
4954 4954 ddi_fm_error_t de;
4955 4955
4956 4956 if (handle == NULL) {
4957 4957 return (DDI_FAILURE);
4958 4958 }
4959 4959
4960 4960 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4961 4961
4962 4962 return (de.fme_status);
4963 4963 }
4964 4964
4965 4965 int
4966 4966 megasas_check_dma_handle(ddi_dma_handle_t handle)
4967 4967 {
4968 4968 ddi_fm_error_t de;
4969 4969
4970 4970 if (handle == NULL) {
4971 4971 return (DDI_FAILURE);
4972 4972 }
4973 4973
4974 4974 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4975 4975
4976 4976 return (de.fme_status);
4977 4977 }
4978 4978
4979 4979 void
4980 4980 megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4981 4981 {
4982 4982 uint64_t ena;
4983 4983 char buf[FM_MAX_CLASS];
4984 4984
4985 4985 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4986 4986 ena = fm_ena_generate(0, FM_ENA_FMT1);
4987 4987 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4988 4988 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4989 4989 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4990 4990 }
4991 4991 }
↓ open down ↓ |
403 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX