Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18 /*
19 19 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 20 * Copyright 2015 Citrus IT Limited. All rights reserved.
21 21 */
22 22
23 23
24 24 #include <sys/types.h>
25 25 #include <sys/file.h>
26 26 #include <sys/atomic.h>
27 27 #include <sys/scsi/scsi.h>
28 28 #include <sys/byteorder.h>
29 29 #include <sys/sdt.h>
30 30 #include "ld_pd_map.h"
31 31 #include "mr_sas.h"
32 32 #include "fusion.h"
33 33
34 34 /*
35 35 * FMA header files
36 36 */
37 37 #include <sys/ddifm.h>
38 38 #include <sys/fm/protocol.h>
39 39 #include <sys/fm/util.h>
40 40 #include <sys/fm/io/ddi.h>
41 41
42 42
43 43 /* Pre-TB command size and TB command size. */
44 44 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
45 45 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
46 46 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
47 47 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
48 48 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
49 49 extern ddi_dma_attr_t mrsas_generic_dma_attr;
50 50 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
51 51 extern struct ddi_device_acc_attr endian_attr;
52 52 extern int debug_level_g;
53 53 extern unsigned int enable_fp;
54 54 volatile int dump_io_wait_time = 90;
55 55 extern volatile int debug_timeout_g;
56 56 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
57 57 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
58 58 extern void push_pending_mfi_pkt(struct mrsas_instance *,
59 59 struct mrsas_cmd *);
60 60 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
61 61 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
62 62
63 63 /* Local static prototypes. */
64 64 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
65 65 struct scsi_address *, struct scsi_pkt *, uchar_t *);
66 66 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
67 67 U64 start_blk, U32 num_blocks);
68 68 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
69 69 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
70 70 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
71 71 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
72 72 #ifdef PDSUPPORT
73 73 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
74 74 struct mrsas_tbolt_pd_info *, int);
75 75 #endif /* PDSUPPORT */
76 76
77 77 static int debug_tbolt_fw_faults_after_ocr_g = 0;
78 78
79 79 /*
80 80 * destroy_mfi_mpi_frame_pool
81 81 */
82 82 void
83 83 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
84 84 {
85 85 int i;
86 86
87 87 struct mrsas_cmd *cmd;
88 88
89 89 /* return all mfi frames to pool */
90 90 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
91 91 cmd = instance->cmd_list[i];
92 92 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
93 93 (void) mrsas_free_dma_obj(instance,
94 94 cmd->frame_dma_obj);
95 95 }
96 96 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
97 97 }
98 98 }
99 99
100 100 /*
101 101 * destroy_mpi2_frame_pool
102 102 */
103 103 void
104 104 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
105 105 {
106 106
107 107 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
108 108 (void) mrsas_free_dma_obj(instance,
109 109 instance->mpi2_frame_pool_dma_obj);
110 110 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
111 111 }
112 112 }
113 113
114 114
115 115 /*
116 116 * mrsas_tbolt_free_additional_dma_buffer
117 117 */
118 118 void
119 119 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
120 120 {
121 121 int i;
122 122
123 123 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
124 124 (void) mrsas_free_dma_obj(instance,
125 125 instance->mfi_internal_dma_obj);
126 126 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
127 127 }
128 128 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
129 129 (void) mrsas_free_dma_obj(instance,
130 130 instance->mfi_evt_detail_obj);
131 131 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
132 132 }
133 133
134 134 for (i = 0; i < 2; i++) {
135 135 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
136 136 (void) mrsas_free_dma_obj(instance,
137 137 instance->ld_map_obj[i]);
138 138 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
139 139 }
140 140 }
141 141 }
142 142
143 143
144 144 /*
145 145 * free_req_desc_pool
146 146 */
147 147 void
148 148 free_req_rep_desc_pool(struct mrsas_instance *instance)
149 149 {
150 150 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
151 151 (void) mrsas_free_dma_obj(instance,
152 152 instance->request_desc_dma_obj);
153 153 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
154 154 }
155 155
156 156 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
157 157 (void) mrsas_free_dma_obj(instance,
158 158 instance->reply_desc_dma_obj);
159 159 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
160 160 }
161 161
162 162
163 163 }
164 164
165 165
166 166 /*
167 167 * ThunderBolt(TB) Request Message Frame Pool
168 168 */
169 169 int
170 170 create_mpi2_frame_pool(struct mrsas_instance *instance)
171 171 {
172 172 int i = 0;
173 173 uint16_t max_cmd;
174 174 uint32_t sgl_sz;
175 175 uint32_t raid_msg_size;
176 176 uint32_t total_size;
177 177 uint32_t offset;
178 178 uint32_t io_req_base_phys;
179 179 uint8_t *io_req_base;
180 180 struct mrsas_cmd *cmd;
181 181
182 182 max_cmd = instance->max_fw_cmds;
183 183
184 184 sgl_sz = 1024;
185 185 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
186 186
187 187 /* Allocating additional 256 bytes to accomodate SMID 0. */
188 188 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
189 189 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
190 190
191 191 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
192 192 "max_cmd %x", max_cmd));
193 193
194 194 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
195 195 "request message frame pool size %x", total_size));
196 196
197 197 /*
198 198 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
199 199 * and then split the memory to 1024 commands. Each command should be
200 200 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
201 201 * within it. Further refer the "alloc_req_rep_desc" function where
202 202 * we allocate request/reply descriptors queues for a clue.
203 203 */
204 204
205 205 instance->mpi2_frame_pool_dma_obj.size = total_size;
206 206 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
207 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
208 208 0xFFFFFFFFU;
209 209 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
210 210 0xFFFFFFFFU;
211 211 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
212 212 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
213 213
214 214 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
215 215 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
216 216 dev_err(instance->dip, CE_WARN,
217 217 "could not alloc mpi2 frame pool");
218 218 return (DDI_FAILURE);
219 219 }
220 220
221 221 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
222 222 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
223 223
224 224 instance->io_request_frames =
225 225 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
226 226 instance->io_request_frames_phy =
227 227 (uint32_t)
228 228 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
229 229
230 230 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
231 231 (void *)instance->io_request_frames));
232 232
233 233 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
234 234 instance->io_request_frames_phy));
235 235
236 236 io_req_base = (uint8_t *)instance->io_request_frames +
237 237 MRSAS_THUNDERBOLT_MSG_SIZE;
238 238 io_req_base_phys = instance->io_request_frames_phy +
239 239 MRSAS_THUNDERBOLT_MSG_SIZE;
240 240
241 241 con_log(CL_DLEVEL3, (CE_NOTE,
242 242 "io req_base_phys 0x%x", io_req_base_phys));
243 243
244 244 for (i = 0; i < max_cmd; i++) {
245 245 cmd = instance->cmd_list[i];
246 246
247 247 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
248 248
249 249 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
250 250 ((uint8_t *)io_req_base + offset);
251 251 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
252 252
253 253 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
254 254 (max_cmd * raid_msg_size) + i * sgl_sz);
255 255
256 256 cmd->sgl_phys_addr = (io_req_base_phys +
257 257 (max_cmd * raid_msg_size) + i * sgl_sz);
258 258
259 259 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
260 260 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 261 (i * SENSE_LENGTH));
262 262
263 263 cmd->sense_phys_addr1 = (io_req_base_phys +
264 264 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
265 265 (i * SENSE_LENGTH));
266 266
267 267
268 268 cmd->SMID = i + 1;
269 269
270 270 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
271 271 cmd->index, (void *)cmd->scsi_io_request));
272 272
273 273 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
274 274 cmd->index, cmd->scsi_io_request_phys_addr));
275 275
276 276 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
277 277 cmd->index, (void *)cmd->sense1));
278 278
279 279 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
280 280 cmd->index, cmd->sense_phys_addr1));
281 281
282 282 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
283 283 cmd->index, (void *)cmd->sgl));
284 284
285 285 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
286 286 cmd->index, cmd->sgl_phys_addr));
287 287 }
288 288
289 289 return (DDI_SUCCESS);
290 290
291 291 }
292 292
293 293
294 294 /*
295 295 * alloc_additional_dma_buffer for AEN
296 296 */
297 297 int
298 298 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
299 299 {
300 300 uint32_t internal_buf_size = PAGESIZE*2;
301 301 int i;
302 302
303 303 /* Initialize buffer status as free */
304 304 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
305 305 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
306 306 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
307 307 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
308 308
309 309
310 310 instance->mfi_internal_dma_obj.size = internal_buf_size;
311 311 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
312 312 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
313 313 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
314 314 0xFFFFFFFFU;
315 315 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
316 316
317 317 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
318 318 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
319 319 dev_err(instance->dip, CE_WARN,
320 320 "could not alloc reply queue");
321 321 return (DDI_FAILURE);
322 322 }
323 323
324 324 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
325 325
326 326 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
327 327 instance->internal_buf =
328 328 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
329 329 instance->internal_buf_dmac_add =
330 330 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
331 331 instance->internal_buf_size = internal_buf_size;
332 332
333 333 /* allocate evt_detail */
334 334 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
335 335 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
336 336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
337 337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
338 338 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
339 339 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
340 340
341 341 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
342 342 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
343 343 dev_err(instance->dip, CE_WARN,
344 344 "mrsas_tbolt_alloc_additional_dma_buffer: "
345 345 "could not allocate data transfer buffer.");
346 346 goto fail_tbolt_additional_buff;
347 347 }
348 348
349 349 bzero(instance->mfi_evt_detail_obj.buffer,
350 350 sizeof (struct mrsas_evt_detail));
351 351
352 352 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
353 353
354 354 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
355 355 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
356 356
357 357 for (i = 0; i < 2; i++) {
358 358 /* allocate the data transfer buffer */
359 359 instance->ld_map_obj[i].size = instance->size_map_info;
360 360 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
361 361 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
362 362 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
363 363 0xFFFFFFFFU;
364 364 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
365 365 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
366 366
367 367 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
368 368 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
369 369 dev_err(instance->dip, CE_WARN,
370 370 "could not allocate data transfer buffer.");
371 371 goto fail_tbolt_additional_buff;
372 372 }
373 373
374 374 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
375 375
376 376 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
377 377
378 378 instance->ld_map[i] =
379 379 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
380 380 instance->ld_map_phy[i] = (uint32_t)instance->
381 381 ld_map_obj[i].dma_cookie[0].dmac_address;
382 382
383 383 con_log(CL_DLEVEL3, (CE_NOTE,
384 384 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
385 385
386 386 con_log(CL_DLEVEL3, (CE_NOTE,
387 387 "size_map_info 0x%x", instance->size_map_info));
388 388 }
389 389
390 390 return (DDI_SUCCESS);
391 391
392 392 fail_tbolt_additional_buff:
393 393 mrsas_tbolt_free_additional_dma_buffer(instance);
394 394
395 395 return (DDI_FAILURE);
396 396 }
397 397
398 398 MRSAS_REQUEST_DESCRIPTOR_UNION *
399 399 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
400 400 {
401 401 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
402 402
403 403 if (index > instance->max_fw_cmds) {
404 404 con_log(CL_ANN1, (CE_NOTE,
405 405 "Invalid SMID 0x%x request for descriptor", index));
406 406 con_log(CL_ANN1, (CE_NOTE,
407 407 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
408 408 return (NULL);
409 409 }
410 410
411 411 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
412 412 ((char *)instance->request_message_pool +
413 413 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
414 414
415 415 con_log(CL_ANN1, (CE_NOTE,
416 416 "request descriptor : 0x%08lx", (unsigned long)req_desc));
417 417
418 418 con_log(CL_ANN1, (CE_NOTE,
419 419 "request descriptor base phy : 0x%08lx",
420 420 (unsigned long)instance->request_message_pool_phy));
421 421
422 422 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
423 423 }
424 424
425 425
426 426 /*
427 427 * Allocate Request and Reply Queue Descriptors.
428 428 */
429 429 int
430 430 alloc_req_rep_desc(struct mrsas_instance *instance)
431 431 {
432 432 uint32_t request_q_sz, reply_q_sz;
433 433 int i, max_reply_q_sz;
434 434 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
435 435
436 436 /*
437 437 * ThunderBolt(TB) There's no longer producer consumer mechanism.
438 438 * Once we have an interrupt we are supposed to scan through the list of
439 439 * reply descriptors and process them accordingly. We would be needing
440 440 * to allocate memory for 1024 reply descriptors
441 441 */
442 442
443 443 /* Allocate Reply Descriptors */
444 444 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
445 445 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446 446
447 447 /* reply queue size should be multiple of 16 */
448 448 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
449 449
450 450 reply_q_sz = 8 * max_reply_q_sz;
451 451
452 452
453 453 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
454 454 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
455 455
456 456 instance->reply_desc_dma_obj.size = reply_q_sz;
457 457 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
458 458 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
459 459 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
460 460 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
461 461 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
462 462
463 463 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
464 464 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
465 465 dev_err(instance->dip, CE_WARN, "could not alloc reply queue");
466 466 return (DDI_FAILURE);
467 467 }
468 468
469 469 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
470 470 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
471 471
472 472 /* virtual address of reply queue */
473 473 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
474 474 instance->reply_desc_dma_obj.buffer);
475 475
476 476 instance->reply_q_depth = max_reply_q_sz;
477 477
478 478 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
479 479 instance->reply_q_depth));
480 480
481 481 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
482 482 (void *)instance->reply_frame_pool));
483 483
484 484 /* initializing reply address to 0xFFFFFFFF */
485 485 reply_desc = instance->reply_frame_pool;
486 486
487 487 for (i = 0; i < instance->reply_q_depth; i++) {
488 488 reply_desc->Words = (uint64_t)~0;
489 489 reply_desc++;
490 490 }
491 491
492 492
493 493 instance->reply_frame_pool_phy =
494 494 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
495 495
496 496 con_log(CL_ANN1, (CE_NOTE,
497 497 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
498 498
499 499
500 500 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
501 501 reply_q_sz);
502 502
503 503 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
504 504 instance->reply_pool_limit_phy));
505 505
506 506
507 507 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 508 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509 509
510 510 /* Allocate Request Descriptors */
511 511 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
512 512 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
513 513
514 514 request_q_sz = 8 *
515 515 (instance->max_fw_cmds);
516 516
517 517 instance->request_desc_dma_obj.size = request_q_sz;
518 518 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
519 519 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
520 520 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
521 521 0xFFFFFFFFU;
522 522 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
523 523 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
524 524
525 525 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
526 526 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
527 527 dev_err(instance->dip, CE_WARN,
528 528 "could not alloc request queue desc");
529 529 goto fail_undo_reply_queue;
530 530 }
531 531
532 532 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
533 533 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
534 534
535 535 /* virtual address of request queue desc */
536 536 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
537 537 (instance->request_desc_dma_obj.buffer);
538 538
539 539 instance->request_message_pool_phy =
540 540 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
541 541
542 542 return (DDI_SUCCESS);
543 543
544 544 fail_undo_reply_queue:
545 545 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
546 546 (void) mrsas_free_dma_obj(instance,
547 547 instance->reply_desc_dma_obj);
548 548 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
549 549 }
550 550
551 551 return (DDI_FAILURE);
552 552 }
553 553
554 554 /*
555 555 * mrsas_alloc_cmd_pool_tbolt
556 556 *
557 557 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
558 558 * routine
559 559 */
560 560 int
561 561 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
562 562 {
563 563 int i;
564 564 int count;
565 565 uint32_t max_cmd;
566 566 uint32_t reserve_cmd;
567 567 size_t sz;
568 568
569 569 struct mrsas_cmd *cmd;
570 570
571 571 max_cmd = instance->max_fw_cmds;
572 572 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
573 573 "max_cmd %x", max_cmd));
574 574
575 575
576 576 sz = sizeof (struct mrsas_cmd *) * max_cmd;
577 577
578 578 /*
579 579 * instance->cmd_list is an array of struct mrsas_cmd pointers.
580 580 * Allocate the dynamic array first and then allocate individual
581 581 * commands.
582 582 */
583 583 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
584 584
585 585 /* create a frame pool and assign one frame to each cmd */
586 586 for (count = 0; count < max_cmd; count++) {
587 587 instance->cmd_list[count] =
588 588 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
589 589 }
590 590
591 591 /* add all the commands to command pool */
592 592
593 593 INIT_LIST_HEAD(&instance->cmd_pool_list);
594 594 INIT_LIST_HEAD(&instance->cmd_pend_list);
595 595 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
596 596
597 597 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
598 598
599 599 /* cmd index 0 reservered for IOC INIT */
600 600 for (i = 1; i < reserve_cmd; i++) {
601 601 cmd = instance->cmd_list[i];
602 602 cmd->index = i;
603 603 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 604 }
605 605
606 606
607 607 for (i = reserve_cmd; i < max_cmd; i++) {
608 608 cmd = instance->cmd_list[i];
609 609 cmd->index = i;
610 610 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 611 }
612 612
613 613 return (DDI_SUCCESS);
614 614
615 615 mrsas_undo_cmds:
616 616 if (count > 0) {
617 617 /* free each cmd */
618 618 for (i = 0; i < count; i++) {
619 619 if (instance->cmd_list[i] != NULL) {
620 620 kmem_free(instance->cmd_list[i],
621 621 sizeof (struct mrsas_cmd));
622 622 }
623 623 instance->cmd_list[i] = NULL;
624 624 }
625 625 }
626 626
627 627 mrsas_undo_cmd_list:
628 628 if (instance->cmd_list != NULL)
629 629 kmem_free(instance->cmd_list, sz);
630 630 instance->cmd_list = NULL;
631 631
632 632 return (DDI_FAILURE);
633 633 }
634 634
635 635
636 636 /*
637 637 * free_space_for_mpi2
638 638 */
639 639 void
640 640 free_space_for_mpi2(struct mrsas_instance *instance)
641 641 {
642 642 /* already freed */
643 643 if (instance->cmd_list == NULL) {
644 644 return;
645 645 }
646 646
647 647 /* First free the additional DMA buffer */
648 648 mrsas_tbolt_free_additional_dma_buffer(instance);
649 649
650 650 /* Free the request/reply descriptor pool */
651 651 free_req_rep_desc_pool(instance);
652 652
653 653 /* Free the MPI message pool */
654 654 destroy_mpi2_frame_pool(instance);
655 655
656 656 /* Free the MFI frame pool */
657 657 destroy_mfi_frame_pool(instance);
658 658
659 659 /* Free all the commands in the cmd_list */
660 660 /* Free the cmd_list buffer itself */
661 661 mrsas_free_cmd_pool(instance);
662 662 }
663 663
664 664
665 665 /*
666 666 * ThunderBolt(TB) memory allocations for commands/messages/frames.
667 667 */
668 668 int
669 669 alloc_space_for_mpi2(struct mrsas_instance *instance)
670 670 {
671 671 /* Allocate command pool (memory for cmd_list & individual commands) */
672 672 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
673 673 dev_err(instance->dip, CE_WARN, "Error creating cmd pool");
674 674 return (DDI_FAILURE);
675 675 }
676 676
677 677 /* Initialize single reply size and Message size */
678 678 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
679 679 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
680 680
681 681 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
682 682 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
683 683 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
684 684 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
685 685 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
686 686
687 687 /* Reduce SG count by 1 to take care of group cmds feature in FW */
688 688 instance->max_num_sge = (instance->max_sge_in_main_msg +
689 689 instance->max_sge_in_chain - 2);
690 690 instance->chain_offset_mpt_msg =
691 691 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
692 692 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
693 693 sizeof (MPI2_SGE_IO_UNION)) / 16;
694 694 instance->reply_read_index = 0;
695 695
696 696
697 697 /* Allocate Request and Reply descriptors Array */
698 698 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
699 699 if (alloc_req_rep_desc(instance)) {
700 700 dev_err(instance->dip, CE_WARN,
701 701 "Error, allocating memory for descripter-pool");
702 702 goto mpi2_undo_cmd_pool;
703 703 }
704 704 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
705 705 instance->request_message_pool_phy));
706 706
707 707
708 708 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
709 709 if (create_mfi_frame_pool(instance)) {
710 710 dev_err(instance->dip, CE_WARN,
711 711 "Error, allocating memory for MFI frame-pool");
712 712 goto mpi2_undo_descripter_pool;
713 713 }
714 714
715 715
716 716 /* Allocate MPI2 Message pool */
717 717 /*
718 718 * Make sure the buffer is alligned to 256 for raid message packet
719 719 * create a io request pool and assign one frame to each cmd
720 720 */
721 721
722 722 if (create_mpi2_frame_pool(instance)) {
723 723 dev_err(instance->dip, CE_WARN,
724 724 "Error, allocating memory for MPI2 Message-pool");
725 725 goto mpi2_undo_mfi_frame_pool;
726 726 }
727 727
728 728 #ifdef DEBUG
729 729 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
730 730 instance->max_sge_in_main_msg));
731 731 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
732 732 instance->max_sge_in_chain));
733 733 con_log(CL_ANN1, (CE_CONT,
734 734 "[max_sge]0x%x", instance->max_num_sge));
735 735 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
736 736 instance->chain_offset_mpt_msg));
737 737 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
738 738 instance->chain_offset_io_req));
739 739 #endif
740 740
741 741
742 742 /* Allocate additional dma buffer */
743 743 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
744 744 dev_err(instance->dip, CE_WARN,
745 745 "Error, allocating tbolt additional DMA buffer");
746 746 goto mpi2_undo_message_pool;
747 747 }
748 748
749 749 return (DDI_SUCCESS);
750 750
751 751 mpi2_undo_message_pool:
752 752 destroy_mpi2_frame_pool(instance);
753 753
754 754 mpi2_undo_mfi_frame_pool:
755 755 destroy_mfi_frame_pool(instance);
756 756
757 757 mpi2_undo_descripter_pool:
758 758 free_req_rep_desc_pool(instance);
759 759
760 760 mpi2_undo_cmd_pool:
761 761 mrsas_free_cmd_pool(instance);
762 762
763 763 return (DDI_FAILURE);
764 764 }
765 765
766 766
767 767 /*
768 768 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
769 769 */
770 770 int
771 771 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
772 772 {
773 773
774 774 /*
775 775 * Reduce the max supported cmds by 1. This is to ensure that the
776 776 * reply_q_sz (1 more than the max cmd that driver may send)
777 777 * does not exceed max cmds that the FW can support
778 778 */
779 779
780 780 if (instance->max_fw_cmds > 1008) {
781 781 instance->max_fw_cmds = 1008;
782 782 instance->max_fw_cmds = instance->max_fw_cmds-1;
783 783 }
784 784
785 785 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
786 786 "instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
787 787
788 788
789 789 /* create a pool of commands */
790 790 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
791 791 dev_err(instance->dip, CE_WARN,
792 792 "alloc_space_for_mpi2() failed.");
793 793
794 794 return (DDI_FAILURE);
795 795 }
796 796
797 797 /* Send ioc init message */
798 798 /* NOTE: the issue_init call does FMA checking already. */
799 799 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
800 800 dev_err(instance->dip, CE_WARN,
801 801 "mrsas_issue_init_mpi2() failed.");
802 802
803 803 goto fail_init_fusion;
804 804 }
805 805
806 806 instance->unroll.alloc_space_mpi2 = 1;
807 807
808 808 con_log(CL_ANN, (CE_NOTE,
809 809 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
810 810
811 811 return (DDI_SUCCESS);
812 812
813 813 fail_init_fusion:
814 814 free_space_for_mpi2(instance);
815 815
816 816 return (DDI_FAILURE);
817 817 }
818 818
819 819
820 820
821 821 /*
822 822 * init_mpi2
823 823 */
824 824 int
825 825 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
826 826 {
827 827 dma_obj_t init2_dma_obj;
828 828 int ret_val = DDI_SUCCESS;
829 829
830 830 /* allocate DMA buffer for IOC INIT message */
831 831 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
832 832 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
833 833 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
834 834 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
835 835 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
836 836 init2_dma_obj.dma_attr.dma_attr_align = 256;
837 837
838 838 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
839 839 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
840 840 dev_err(instance->dip, CE_WARN, "mr_sas_issue_init_mpi2 "
841 841 "could not allocate data transfer buffer.");
842 842 return (DDI_FAILURE);
843 843 }
844 844 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
845 845
846 846 con_log(CL_ANN1, (CE_NOTE,
847 847 "mrsas_issue_init_mpi2 _phys adr: %x",
848 848 init2_dma_obj.dma_cookie[0].dmac_address));
849 849
850 850
851 851 /* Initialize and send ioc init message */
852 852 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
853 853 if (ret_val == DDI_FAILURE) {
854 854 con_log(CL_ANN1, (CE_WARN,
855 855 "mrsas_issue_init_mpi2: Failed"));
856 856 goto fail_init_mpi2;
857 857 }
858 858
859 859 /* free IOC init DMA buffer */
860 860 if (mrsas_free_dma_obj(instance, init2_dma_obj)
861 861 != DDI_SUCCESS) {
862 862 con_log(CL_ANN1, (CE_WARN,
863 863 "mrsas_issue_init_mpi2: Free Failed"));
864 864 return (DDI_FAILURE);
865 865 }
866 866
867 867 /* Get/Check and sync ld_map info */
868 868 instance->map_id = 0;
869 869 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
870 870 (void) mrsas_tbolt_sync_map_info(instance);
871 871
872 872
873 873 /* No mrsas_cmd to send, so send NULL. */
874 874 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
875 875 goto fail_init_mpi2;
876 876
877 877 con_log(CL_ANN, (CE_NOTE,
878 878 "mrsas_issue_init_mpi2: SUCCESSFUL"));
879 879
880 880 return (DDI_SUCCESS);
881 881
882 882 fail_init_mpi2:
883 883 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
884 884
885 885 return (DDI_FAILURE);
886 886 }
887 887
888 888 static int
889 889 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
890 890 {
891 891 int numbytes;
892 892 uint16_t flags;
893 893 struct mrsas_init_frame2 *mfiFrameInit2;
894 894 struct mrsas_header *frame_hdr;
895 895 Mpi2IOCInitRequest_t *init;
896 896 struct mrsas_cmd *cmd = NULL;
897 897 struct mrsas_drv_ver drv_ver_info;
898 898 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
899 899 uint32_t timeout;
900 900
901 901 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
902 902
903 903
904 904 #ifdef DEBUG
905 905 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
906 906 (int)sizeof (*mfiFrameInit2)));
907 907 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
908 908 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 909 (int)sizeof (struct mrsas_init_frame2)));
910 910 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
911 911 (int)sizeof (Mpi2IOCInitRequest_t)));
912 912 #endif
913 913
914 914 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
915 915 numbytes = sizeof (*init);
916 916 bzero(init, numbytes);
917 917
918 918 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
919 919 MPI2_FUNCTION_IOC_INIT);
920 920
921 921 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
922 922 MPI2_WHOINIT_HOST_DRIVER);
923 923
924 924 /* set MsgVersion and HeaderVersion host driver was built with */
925 925 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
926 926 MPI2_VERSION);
927 927
928 928 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
929 929 MPI2_HEADER_VERSION);
930 930
931 931 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
932 932 instance->raid_io_msg_size / 4);
933 933
934 934 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
935 935 0);
936 936
937 937 ddi_put16(mpi2_dma_obj->acc_handle,
938 938 &init->ReplyDescriptorPostQueueDepth,
939 939 instance->reply_q_depth);
940 940 /*
941 941 * These addresses are set using the DMA cookie addresses from when the
942 942 * memory was allocated. Sense buffer hi address should be 0.
943 943 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
944 944 */
945 945
946 946 ddi_put32(mpi2_dma_obj->acc_handle,
947 947 &init->SenseBufferAddressHigh, 0);
948 948
949 949 ddi_put64(mpi2_dma_obj->acc_handle,
950 950 (uint64_t *)&init->SystemRequestFrameBaseAddress,
951 951 instance->io_request_frames_phy);
952 952
953 953 ddi_put64(mpi2_dma_obj->acc_handle,
954 954 &init->ReplyDescriptorPostQueueAddress,
955 955 instance->reply_frame_pool_phy);
956 956
957 957 ddi_put64(mpi2_dma_obj->acc_handle,
958 958 &init->ReplyFreeQueueAddress, 0);
959 959
960 960 cmd = instance->cmd_list[0];
961 961 if (cmd == NULL) {
962 962 return (DDI_FAILURE);
963 963 }
964 964 cmd->retry_count_for_ocr = 0;
965 965 cmd->pkt = NULL;
966 966 cmd->drv_pkt_time = 0;
967 967
968 968 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
969 969 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
970 970
971 971 frame_hdr = &cmd->frame->hdr;
972 972
973 973 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
974 974 MFI_CMD_STATUS_POLL_MODE);
975 975
976 976 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
977 977
978 978 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
979 979
980 980 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
981 981
982 982 con_log(CL_ANN, (CE_CONT,
983 983 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
984 984
985 985 /* Init the MFI Header */
986 986 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
987 987 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
988 988
989 989 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
990 990
991 991 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
992 992 &mfiFrameInit2->cmd_status,
993 993 MFI_STAT_INVALID_STATUS);
994 994
995 995 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
996 996
997 997 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
998 998 &mfiFrameInit2->queue_info_new_phys_addr_lo,
999 999 mpi2_dma_obj->dma_cookie[0].dmac_address);
1000 1000
1001 1001 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1002 1002 &mfiFrameInit2->data_xfer_len,
1003 1003 sizeof (Mpi2IOCInitRequest_t));
1004 1004
1005 1005 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1006 1006 (int)init->ReplyDescriptorPostQueueAddress));
1007 1007
1008 1008 /* fill driver version information */
1009 1009 fill_up_drv_ver(&drv_ver_info);
1010 1010
1011 1011 /* allocate the driver version data transfer buffer */
1012 1012 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1013 1013 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1014 1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1015 1015 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1016 1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1017 1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1018 1018
1019 1019 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1020 1020 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1021 1021 dev_err(instance->dip, CE_WARN,
1022 1022 "fusion init: Could not allocate driver version buffer.");
1023 1023 return (DDI_FAILURE);
1024 1024 }
1025 1025 /* copy driver version to dma buffer */
1026 1026 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1027 1027 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1028 1028 (uint8_t *)drv_ver_info.drv_ver,
1029 1029 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1030 1030 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1031 1031
1032 1032 /* send driver version physical address to firmware */
1033 1033 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1034 1034 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1035 1035
1036 1036 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1037 1037 mfiFrameInit2->queue_info_new_phys_addr_lo,
1038 1038 (int)sizeof (Mpi2IOCInitRequest_t)));
1039 1039
1040 1040 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1041 1041
1042 1042 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1043 1043 cmd->scsi_io_request_phys_addr,
1044 1044 (int)sizeof (struct mrsas_init_frame2)));
1045 1045
1046 1046 /* disable interrupts before sending INIT2 frame */
1047 1047 instance->func_ptr->disable_intr(instance);
1048 1048
1049 1049 req_desc.Words = cmd->scsi_io_request_phys_addr;
1050 1050 req_desc.MFAIo.RequestFlags =
1051 1051 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1052 1052
1053 1053 cmd->request_desc = &req_desc;
1054 1054
1055 1055 /* issue the init frame */
↓ open down ↓ |
1055 lines elided |
↑ open up ↑ |
1056 1056
1057 1057 mutex_enter(&instance->reg_write_mtx);
1058 1058 WR_IB_LOW_QPORT((uint32_t)(req_desc.Words), instance);
1059 1059 WR_IB_HIGH_QPORT((uint32_t)(req_desc.Words >> 32), instance);
1060 1060 mutex_exit(&instance->reg_write_mtx);
1061 1061
1062 1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 1064 frame_hdr->cmd_status));
1065 1065
1066 - timeout = drv_usectohz(MFI_POLL_TIMEOUT_SECS * MICROSEC);
1066 + timeout = drv_sectohz(MFI_POLL_TIMEOUT_SECS);
1067 1067 do {
1068 1068 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
1069 1069 &mfiFrameInit2->cmd_status) != MFI_CMD_STATUS_POLL_MODE)
1070 1070 break;
1071 1071 delay(1);
1072 1072 timeout--;
1073 1073 } while (timeout > 0);
1074 1074
1075 1075 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1076 1076 &mfiFrameInit2->cmd_status) == 0) {
1077 1077 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1078 1078 } else {
1079 1079 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1080 1080 mrsas_dump_reply_desc(instance);
1081 1081 goto fail_ioc_init;
1082 1082 }
1083 1083
1084 1084 mrsas_dump_reply_desc(instance);
1085 1085
1086 1086 instance->unroll.verBuff = 1;
1087 1087
1088 1088 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1089 1089
1090 1090 return (DDI_SUCCESS);
1091 1091
1092 1092
1093 1093 fail_ioc_init:
1094 1094
1095 1095 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1096 1096
1097 1097 return (DDI_FAILURE);
1098 1098 }
1099 1099
1100 1100 int
1101 1101 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1102 1102 {
1103 1103 int i;
1104 1104 uint32_t wait_time = dump_io_wait_time;
1105 1105 for (i = 0; i < wait_time; i++) {
1106 1106 /*
1107 1107 * Check For Outstanding poll Commands
1108 1108 * except ldsync command and aen command
1109 1109 */
1110 1110 if (instance->fw_outstanding <= 2) {
1111 1111 break;
1112 1112 }
1113 1113 drv_usecwait(10*MILLISEC);
1114 1114 /* complete commands from reply queue */
1115 1115 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1116 1116 }
1117 1117 if (instance->fw_outstanding > 2) {
1118 1118 return (1);
1119 1119 }
1120 1120 return (0);
1121 1121 }
1122 1122 /*
1123 1123 * scsi_pkt handling
1124 1124 *
1125 1125 * Visible to the external world via the transport structure.
1126 1126 */
1127 1127
1128 1128 int
1129 1129 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1130 1130 {
1131 1131 struct mrsas_instance *instance = ADDR2MR(ap);
1132 1132 struct scsa_cmd *acmd = PKT2CMD(pkt);
1133 1133 struct mrsas_cmd *cmd = NULL;
1134 1134 uchar_t cmd_done = 0;
1135 1135
1136 1136 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1137 1137 if (instance->deadadapter == 1) {
1138 1138 dev_err(instance->dip, CE_WARN,
1139 1139 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1140 1140 "for IO, as the HBA doesnt take any more IOs");
1141 1141 if (pkt) {
1142 1142 pkt->pkt_reason = CMD_DEV_GONE;
1143 1143 pkt->pkt_statistics = STAT_DISCON;
1144 1144 }
1145 1145 return (TRAN_FATAL_ERROR);
1146 1146 }
1147 1147 if (instance->adapterresetinprogress) {
1148 1148 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1149 1149 "returning mfi_pkt and setting TRAN_BUSY\n"));
1150 1150 return (TRAN_BUSY);
1151 1151 }
1152 1152 (void) mrsas_tbolt_prepare_pkt(acmd);
1153 1153
1154 1154 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1155 1155
1156 1156 /*
1157 1157 * Check if the command is already completed by the mrsas_build_cmd()
1158 1158 * routine. In which case the busy_flag would be clear and scb will be
1159 1159 * NULL and appropriate reason provided in pkt_reason field
1160 1160 */
1161 1161 if (cmd_done) {
1162 1162 pkt->pkt_reason = CMD_CMPLT;
1163 1163 pkt->pkt_scbp[0] = STATUS_GOOD;
1164 1164 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1165 1165 | STATE_SENT_CMD;
1166 1166 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1167 1167 (*pkt->pkt_comp)(pkt);
1168 1168 }
1169 1169
1170 1170 return (TRAN_ACCEPT);
1171 1171 }
1172 1172
1173 1173 if (cmd == NULL) {
1174 1174 return (TRAN_BUSY);
1175 1175 }
1176 1176
1177 1177
1178 1178 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1179 1179 if (instance->fw_outstanding > instance->max_fw_cmds) {
1180 1180 dev_err(instance->dip, CE_WARN,
1181 1181 "Command Queue Full... Returning BUSY");
1182 1182 DTRACE_PROBE2(tbolt_start_tran_err,
1183 1183 uint16_t, instance->fw_outstanding,
1184 1184 uint16_t, instance->max_fw_cmds);
1185 1185 return_raid_msg_pkt(instance, cmd);
1186 1186 return (TRAN_BUSY);
1187 1187 }
1188 1188
1189 1189 /* Synchronize the Cmd frame for the controller */
1190 1190 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1191 1191 DDI_DMA_SYNC_FORDEV);
1192 1192
1193 1193 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1194 1194 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1195 1195 cmd->index, cmd->SMID));
1196 1196
1197 1197 instance->func_ptr->issue_cmd(cmd, instance);
1198 1198 } else {
1199 1199 instance->func_ptr->issue_cmd(cmd, instance);
1200 1200 (void) wait_for_outstanding_poll_io(instance);
1201 1201 (void) mrsas_common_check(instance, cmd);
1202 1202 DTRACE_PROBE2(tbolt_start_nointr_done,
1203 1203 uint8_t, cmd->frame->hdr.cmd,
1204 1204 uint8_t, cmd->frame->hdr.cmd_status);
1205 1205 }
1206 1206
1207 1207 return (TRAN_ACCEPT);
1208 1208 }
1209 1209
1210 1210 /*
1211 1211 * prepare the pkt:
1212 1212 * the pkt may have been resubmitted or just reused so
1213 1213 * initialize some fields and do some checks.
1214 1214 */
1215 1215 static int
1216 1216 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1217 1217 {
1218 1218 struct scsi_pkt *pkt = CMD2PKT(acmd);
1219 1219
1220 1220
1221 1221 /*
1222 1222 * Reinitialize some fields that need it; the packet may
1223 1223 * have been resubmitted
1224 1224 */
1225 1225 pkt->pkt_reason = CMD_CMPLT;
1226 1226 pkt->pkt_state = 0;
1227 1227 pkt->pkt_statistics = 0;
1228 1228 pkt->pkt_resid = 0;
1229 1229
1230 1230 /*
1231 1231 * zero status byte.
1232 1232 */
1233 1233 *(pkt->pkt_scbp) = 0;
1234 1234
1235 1235 return (0);
1236 1236 }
1237 1237
1238 1238
1239 1239 int
1240 1240 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1241 1241 struct scsa_cmd *acmd,
1242 1242 struct mrsas_cmd *cmd,
1243 1243 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1244 1244 uint32_t *datalen)
1245 1245 {
1246 1246 uint32_t MaxSGEs;
1247 1247 int sg_to_process;
1248 1248 uint32_t i, j;
1249 1249 uint32_t numElements, endElement;
1250 1250 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1251 1251 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1252 1252 ddi_acc_handle_t acc_handle =
1253 1253 instance->mpi2_frame_pool_dma_obj.acc_handle;
1254 1254
1255 1255 con_log(CL_ANN1, (CE_NOTE,
1256 1256 "chkpnt: Building Chained SGL :%d", __LINE__));
1257 1257
1258 1258 /* Calulate SGE size in number of Words(32bit) */
1259 1259 /* Clear the datalen before updating it. */
1260 1260 *datalen = 0;
1261 1261
1262 1262 MaxSGEs = instance->max_sge_in_main_msg;
1263 1263
1264 1264 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1265 1265 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1266 1266
1267 1267 /* set data transfer flag. */
1268 1268 if (acmd->cmd_flags & CFLAG_DMASEND) {
1269 1269 ddi_put32(acc_handle, &scsi_raid_io->Control,
1270 1270 MPI2_SCSIIO_CONTROL_WRITE);
1271 1271 } else {
1272 1272 ddi_put32(acc_handle, &scsi_raid_io->Control,
1273 1273 MPI2_SCSIIO_CONTROL_READ);
1274 1274 }
1275 1275
1276 1276
1277 1277 numElements = acmd->cmd_cookiecnt;
1278 1278
1279 1279 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1280 1280
1281 1281 if (numElements > instance->max_num_sge) {
1282 1282 con_log(CL_ANN, (CE_NOTE,
1283 1283 "[Max SGE Count Exceeded]:%x", numElements));
1284 1284 return (numElements);
1285 1285 }
1286 1286
1287 1287 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1288 1288 (uint8_t)numElements);
1289 1289
1290 1290 /* set end element in main message frame */
1291 1291 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1292 1292
1293 1293 /* prepare the scatter-gather list for the firmware */
1294 1294 scsi_raid_io_sgl_ieee =
1295 1295 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1296 1296
1297 1297 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1298 1298 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1299 1299 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1300 1300
1301 1301 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1302 1302 }
1303 1303
1304 1304 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1305 1305 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1306 1306 acmd->cmd_dmacookies[i].dmac_laddress);
1307 1307
1308 1308 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1309 1309 acmd->cmd_dmacookies[i].dmac_size);
1310 1310
1311 1311 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1312 1312
1313 1313 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1314 1314 if (i == (numElements - 1)) {
1315 1315 ddi_put8(acc_handle,
1316 1316 &scsi_raid_io_sgl_ieee->Flags,
1317 1317 IEEE_SGE_FLAGS_END_OF_LIST);
1318 1318 }
1319 1319 }
1320 1320
1321 1321 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1322 1322
1323 1323 #ifdef DEBUG
1324 1324 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1325 1325 scsi_raid_io_sgl_ieee->Address));
1326 1326 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1327 1327 scsi_raid_io_sgl_ieee->Length));
1328 1328 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1329 1329 scsi_raid_io_sgl_ieee->Flags));
1330 1330 #endif
1331 1331
1332 1332 }
1333 1333
1334 1334 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1335 1335
1336 1336 /* check if chained SGL required */
1337 1337 if (i < numElements) {
1338 1338
1339 1339 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1340 1340
1341 1341 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1342 1342 uint16_t ioFlags =
1343 1343 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1344 1344
1345 1345 if ((ioFlags &
1346 1346 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1347 1347 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1348 1348 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1349 1349 (U8)instance->chain_offset_io_req);
1350 1350 } else {
1351 1351 ddi_put8(acc_handle,
1352 1352 &scsi_raid_io->ChainOffset, 0);
1353 1353 }
1354 1354 } else {
1355 1355 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1356 1356 (U8)instance->chain_offset_io_req);
1357 1357 }
1358 1358
1359 1359 /* prepare physical chain element */
1360 1360 ieeeChainElement = scsi_raid_io_sgl_ieee;
1361 1361
1362 1362 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1363 1363
1364 1364 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1365 1365 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1366 1366 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1367 1367 } else {
1368 1368 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1369 1369 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1370 1370 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1371 1371 }
1372 1372
1373 1373 ddi_put32(acc_handle, &ieeeChainElement->Length,
1374 1374 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1375 1375
1376 1376 ddi_put64(acc_handle, &ieeeChainElement->Address,
1377 1377 (U64)cmd->sgl_phys_addr);
1378 1378
1379 1379 sg_to_process = numElements - i;
1380 1380
1381 1381 con_log(CL_ANN1, (CE_NOTE,
1382 1382 "[Additional SGE Count]:%x", endElement));
1383 1383
1384 1384 /* point to the chained SGL buffer */
1385 1385 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1386 1386
1387 1387 /* build rest of the SGL in chained buffer */
1388 1388 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1389 1389 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1390 1390
1391 1391 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1392 1392 acmd->cmd_dmacookies[i].dmac_laddress);
1393 1393
1394 1394 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1395 1395 acmd->cmd_dmacookies[i].dmac_size);
1396 1396
1397 1397 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1398 1398
1399 1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 1400 if (i == (numElements - 1)) {
1401 1401 ddi_put8(acc_handle,
1402 1402 &scsi_raid_io_sgl_ieee->Flags,
1403 1403 IEEE_SGE_FLAGS_END_OF_LIST);
1404 1404 }
1405 1405 }
1406 1406
1407 1407 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1408 1408
1409 1409 #if DEBUG
1410 1410 con_log(CL_DLEVEL1, (CE_NOTE,
1411 1411 "[SGL Address]: %" PRIx64,
1412 1412 scsi_raid_io_sgl_ieee->Address));
1413 1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 1414 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1415 1415 con_log(CL_DLEVEL1, (CE_NOTE,
1416 1416 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1417 1417 #endif
1418 1418
1419 1419 i++;
1420 1420 }
1421 1421 }
1422 1422
1423 1423 return (0);
1424 1424 } /*end of BuildScatterGather */
1425 1425
1426 1426
1427 1427 /*
1428 1428 * build_cmd
1429 1429 */
1430 1430 static struct mrsas_cmd *
1431 1431 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1432 1432 struct scsi_pkt *pkt, uchar_t *cmd_done)
1433 1433 {
1434 1434 uint8_t fp_possible = 0;
1435 1435 uint32_t index;
1436 1436 uint32_t lba_count = 0;
1437 1437 uint32_t start_lba_hi = 0;
1438 1438 uint32_t start_lba_lo = 0;
1439 1439 ddi_acc_handle_t acc_handle =
1440 1440 instance->mpi2_frame_pool_dma_obj.acc_handle;
1441 1441 struct mrsas_cmd *cmd = NULL;
1442 1442 struct scsa_cmd *acmd = PKT2CMD(pkt);
1443 1443 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1444 1444 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1445 1445 uint32_t datalen;
1446 1446 struct IO_REQUEST_INFO io_info;
1447 1447 MR_FW_RAID_MAP_ALL *local_map_ptr;
1448 1448 uint16_t pd_cmd_cdblen;
1449 1449
1450 1450 con_log(CL_DLEVEL1, (CE_NOTE,
1451 1451 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1452 1452
1453 1453 /* find out if this is logical or physical drive command. */
1454 1454 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1455 1455 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1456 1456
1457 1457 *cmd_done = 0;
1458 1458
1459 1459 /* get the command packet */
1460 1460 if (!(cmd = get_raid_msg_pkt(instance))) {
1461 1461 DTRACE_PROBE2(tbolt_build_cmd_mfi_err, uint16_t,
1462 1462 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
1463 1463 return (NULL);
1464 1464 }
1465 1465
1466 1466 index = cmd->index;
1467 1467 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1468 1468 ReqDescUnion->Words = 0;
1469 1469 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1470 1470 ReqDescUnion->SCSIIO.RequestFlags =
1471 1471 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1472 1472 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1473 1473
1474 1474
1475 1475 cmd->request_desc = ReqDescUnion;
1476 1476 cmd->pkt = pkt;
1477 1477 cmd->cmd = acmd;
1478 1478
1479 1479 DTRACE_PROBE4(tbolt_build_cmd, uint8_t, pkt->pkt_cdbp[0],
1480 1480 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len,
1481 1481 uint16_t, acmd->device_id);
1482 1482
1483 1483 /* lets get the command directions */
1484 1484 if (acmd->cmd_flags & CFLAG_DMASEND) {
1485 1485 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1486 1486 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1487 1487 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1488 1488 DDI_DMA_SYNC_FORDEV);
1489 1489 }
1490 1490 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1491 1491 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1492 1492 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1493 1493 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1494 1494 DDI_DMA_SYNC_FORCPU);
1495 1495 }
1496 1496 } else {
1497 1497 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1498 1498 }
1499 1499
1500 1500
1501 1501 /* get SCSI_IO raid message frame pointer */
1502 1502 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1503 1503
1504 1504 /* zero out SCSI_IO raid message frame */
1505 1505 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1506 1506
1507 1507 /* Set the ldTargetId set by BuildRaidContext() */
1508 1508 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1509 1509 acmd->device_id);
1510 1510
1511 1511 /* Copy CDB to scsi_io_request message frame */
1512 1512 ddi_rep_put8(acc_handle,
1513 1513 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1514 1514 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1515 1515
1516 1516 /*
1517 1517 * Just the CDB length, rest of the Flags are zero
1518 1518 * This will be modified later.
1519 1519 */
1520 1520 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1521 1521
1522 1522 pd_cmd_cdblen = acmd->cmd_cdblen;
1523 1523
1524 1524 if (acmd->islogical) {
1525 1525
1526 1526 switch (pkt->pkt_cdbp[0]) {
1527 1527 case SCMD_READ:
1528 1528 case SCMD_WRITE:
1529 1529 case SCMD_READ_G1:
1530 1530 case SCMD_WRITE_G1:
1531 1531 case SCMD_READ_G4:
1532 1532 case SCMD_WRITE_G4:
1533 1533 case SCMD_READ_G5:
1534 1534 case SCMD_WRITE_G5:
1535 1535
1536 1536 /* Initialize sense Information */
1537 1537 if (cmd->sense1 == NULL) {
1538 1538 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1539 1539 "Sense buffer ptr NULL "));
1540 1540 }
1541 1541 bzero(cmd->sense1, SENSE_LENGTH);
1542 1542 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1543 1543 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1544 1544
1545 1545 if (acmd->cmd_cdblen == CDB_GROUP0) {
1546 1546 /* 6-byte cdb */
1547 1547 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1548 1548 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1549 1549 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1550 1550 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1551 1551 << 16));
1552 1552 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1553 1553 /* 10-byte cdb */
1554 1554 lba_count =
1555 1555 (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 1556 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557 1557
1558 1558 start_lba_lo =
1559 1559 (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 1560 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 1561 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 1562 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563 1563
1564 1564 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1565 1565 /* 12-byte cdb */
1566 1566 lba_count = (
1567 1567 ((uint32_t)(pkt->pkt_cdbp[9])) |
1568 1568 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1569 1569 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1570 1570 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1571 1571
1572 1572 start_lba_lo =
1573 1573 (((uint32_t)(pkt->pkt_cdbp[5])) |
1574 1574 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1575 1575 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1576 1576 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1577 1577
1578 1578 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1579 1579 /* 16-byte cdb */
1580 1580 lba_count = (
1581 1581 ((uint32_t)(pkt->pkt_cdbp[13])) |
1582 1582 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1583 1583 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1584 1584 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1585 1585
1586 1586 start_lba_lo = (
1587 1587 ((uint32_t)(pkt->pkt_cdbp[9])) |
1588 1588 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1589 1589 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1590 1590 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1591 1591
1592 1592 start_lba_hi = (
1593 1593 ((uint32_t)(pkt->pkt_cdbp[5])) |
1594 1594 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1595 1595 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1596 1596 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1597 1597 }
1598 1598
1599 1599 if (instance->tbolt &&
1600 1600 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1601 1601 dev_err(instance->dip, CE_WARN,
1602 1602 "IO SECTOR COUNT exceeds "
1603 1603 "controller limit 0x%x sectors",
1604 1604 lba_count);
1605 1605 }
1606 1606
1607 1607 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1608 1608 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1609 1609 start_lba_lo;
1610 1610 io_info.numBlocks = lba_count;
1611 1611 io_info.ldTgtId = acmd->device_id;
1612 1612
1613 1613 if (acmd->cmd_flags & CFLAG_DMASEND)
1614 1614 io_info.isRead = 0;
1615 1615 else
1616 1616 io_info.isRead = 1;
1617 1617
1618 1618
1619 1619 /* Acquire SYNC MAP UPDATE lock */
1620 1620 mutex_enter(&instance->sync_map_mtx);
1621 1621
1622 1622 local_map_ptr =
1623 1623 instance->ld_map[(instance->map_id & 1)];
1624 1624
1625 1625 if ((MR_TargetIdToLdGet(
1626 1626 acmd->device_id, local_map_ptr) >=
1627 1627 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1628 1628 dev_err(instance->dip, CE_NOTE,
1629 1629 "Fast Path NOT Possible, "
1630 1630 "targetId >= MAX_LOGICAL_DRIVES || "
1631 1631 "!instance->fast_path_io");
1632 1632 fp_possible = 0;
1633 1633 /* Set Regionlock flags to BYPASS */
1634 1634 /* io_request->RaidContext.regLockFlags = 0; */
1635 1635 ddi_put8(acc_handle,
1636 1636 &scsi_raid_io->RaidContext.regLockFlags, 0);
1637 1637 } else {
1638 1638 if (MR_BuildRaidContext(instance, &io_info,
1639 1639 &scsi_raid_io->RaidContext, local_map_ptr))
1640 1640 fp_possible = io_info.fpOkForIo;
1641 1641 }
1642 1642
1643 1643 if (!enable_fp)
1644 1644 fp_possible = 0;
1645 1645
1646 1646 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1647 1647 "instance->fast_path_io %d fp_possible %d",
1648 1648 enable_fp, instance->fast_path_io, fp_possible));
1649 1649
1650 1650 if (fp_possible) {
1651 1651
1652 1652 /* Check for DIF enabled LD */
1653 1653 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1654 1654 /* Prepare 32 Byte CDB for DIF capable Disk */
1655 1655 mrsas_tbolt_prepare_cdb(instance,
1656 1656 scsi_raid_io->CDB.CDB32,
1657 1657 &io_info, scsi_raid_io, start_lba_lo);
1658 1658 } else {
1659 1659 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1660 1660 (uint8_t *)&pd_cmd_cdblen,
1661 1661 io_info.pdBlock, io_info.numBlocks);
1662 1662 ddi_put16(acc_handle,
1663 1663 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1664 1664 }
1665 1665
1666 1666 ddi_put8(acc_handle, &scsi_raid_io->Function,
1667 1667 MPI2_FUNCTION_SCSI_IO_REQUEST);
1668 1668
1669 1669 ReqDescUnion->SCSIIO.RequestFlags =
1670 1670 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1671 1671 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1672 1672
1673 1673 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1674 1674 uint8_t regLockFlags = ddi_get8(acc_handle,
1675 1675 &scsi_raid_io->RaidContext.regLockFlags);
1676 1676 uint16_t IoFlags = ddi_get16(acc_handle,
1677 1677 &scsi_raid_io->IoFlags);
1678 1678
1679 1679 if (regLockFlags == REGION_TYPE_UNUSED)
1680 1680 ReqDescUnion->SCSIIO.RequestFlags =
1681 1681 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1682 1682 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1683 1683
1684 1684 IoFlags |=
1685 1685 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1686 1686 regLockFlags |=
1687 1687 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1688 1688 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1689 1689
1690 1690 ddi_put8(acc_handle,
1691 1691 &scsi_raid_io->ChainOffset, 0);
1692 1692 ddi_put8(acc_handle,
1693 1693 &scsi_raid_io->RaidContext.nsegType,
1694 1694 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1695 1695 MPI2_TYPE_CUDA));
1696 1696 ddi_put8(acc_handle,
1697 1697 &scsi_raid_io->RaidContext.regLockFlags,
1698 1698 regLockFlags);
1699 1699 ddi_put16(acc_handle,
1700 1700 &scsi_raid_io->IoFlags, IoFlags);
1701 1701 }
1702 1702
1703 1703 if ((instance->load_balance_info[
1704 1704 acmd->device_id].loadBalanceFlag) &&
1705 1705 (io_info.isRead)) {
1706 1706 io_info.devHandle =
1707 1707 get_updated_dev_handle(&instance->
1708 1708 load_balance_info[acmd->device_id],
1709 1709 &io_info);
1710 1710 cmd->load_balance_flag |=
1711 1711 MEGASAS_LOAD_BALANCE_FLAG;
1712 1712 } else {
1713 1713 cmd->load_balance_flag &=
1714 1714 ~MEGASAS_LOAD_BALANCE_FLAG;
1715 1715 }
1716 1716
1717 1717 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1718 1718 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1719 1719 io_info.devHandle);
1720 1720
1721 1721 } else { /* FP Not Possible */
1722 1722
1723 1723 ddi_put8(acc_handle, &scsi_raid_io->Function,
1724 1724 MPI2_FUNCTION_LD_IO_REQUEST);
1725 1725
1726 1726 ddi_put16(acc_handle,
1727 1727 &scsi_raid_io->DevHandle, acmd->device_id);
1728 1728
1729 1729 ReqDescUnion->SCSIIO.RequestFlags =
1730 1730 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1731 1731 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1732 1732
1733 1733 ddi_put16(acc_handle,
1734 1734 &scsi_raid_io->RaidContext.timeoutValue,
1735 1735 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1736 1736
1737 1737 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1738 1738 uint8_t regLockFlags = ddi_get8(acc_handle,
1739 1739 &scsi_raid_io->RaidContext.regLockFlags);
1740 1740
1741 1741 if (regLockFlags == REGION_TYPE_UNUSED) {
1742 1742 ReqDescUnion->SCSIIO.RequestFlags =
1743 1743 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1744 1744 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1745 1745 }
1746 1746
1747 1747 regLockFlags |=
1748 1748 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1749 1749 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1750 1750
1751 1751 ddi_put8(acc_handle,
1752 1752 &scsi_raid_io->RaidContext.nsegType,
1753 1753 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1754 1754 MPI2_TYPE_CUDA));
1755 1755 ddi_put8(acc_handle,
1756 1756 &scsi_raid_io->RaidContext.regLockFlags,
1757 1757 regLockFlags);
1758 1758 }
1759 1759 } /* Not FP */
1760 1760
1761 1761 /* Release SYNC MAP UPDATE lock */
1762 1762 mutex_exit(&instance->sync_map_mtx);
1763 1763
1764 1764 break;
1765 1765
1766 1766 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1767 1767 return_raid_msg_pkt(instance, cmd);
1768 1768 *cmd_done = 1;
1769 1769 return (NULL);
1770 1770 }
1771 1771
1772 1772 case SCMD_MODE_SENSE:
1773 1773 case SCMD_MODE_SENSE_G1: {
1774 1774 union scsi_cdb *cdbp;
1775 1775 uint16_t page_code;
1776 1776
1777 1777 cdbp = (void *)pkt->pkt_cdbp;
1778 1778 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1779 1779 switch (page_code) {
1780 1780 case 0x3:
1781 1781 case 0x4:
1782 1782 (void) mrsas_mode_sense_build(pkt);
1783 1783 return_raid_msg_pkt(instance, cmd);
1784 1784 *cmd_done = 1;
1785 1785 return (NULL);
1786 1786 }
1787 1787 return (cmd);
1788 1788 }
1789 1789
1790 1790 default:
1791 1791 /* Pass-through command to logical drive */
1792 1792 ddi_put8(acc_handle, &scsi_raid_io->Function,
1793 1793 MPI2_FUNCTION_LD_IO_REQUEST);
1794 1794 ddi_put8(acc_handle, &scsi_raid_io->LUN[1], acmd->lun);
1795 1795 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1796 1796 acmd->device_id);
1797 1797 ReqDescUnion->SCSIIO.RequestFlags =
1798 1798 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1799 1799 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1800 1800 break;
1801 1801 }
1802 1802 } else { /* Physical */
1803 1803 #ifdef PDSUPPORT
1804 1804 /* Pass-through command to physical drive */
1805 1805
1806 1806 /* Acquire SYNC MAP UPDATE lock */
1807 1807 mutex_enter(&instance->sync_map_mtx);
1808 1808
1809 1809 local_map_ptr = instance->ld_map[instance->map_id & 1];
1810 1810
1811 1811 ddi_put8(acc_handle, &scsi_raid_io->Function,
1812 1812 MPI2_FUNCTION_SCSI_IO_REQUEST);
1813 1813
1814 1814 ReqDescUnion->SCSIIO.RequestFlags =
1815 1815 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1816 1816 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1817 1817
1818 1818 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1819 1819 local_map_ptr->raidMap.
1820 1820 devHndlInfo[acmd->device_id].curDevHdl);
1821 1821
1822 1822 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1823 1823 ddi_put8(acc_handle,
1824 1824 &scsi_raid_io->RaidContext.regLockFlags, 0);
1825 1825 ddi_put64(acc_handle,
1826 1826 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1827 1827 ddi_put32(acc_handle,
1828 1828 &scsi_raid_io->RaidContext.regLockLength, 0);
1829 1829 ddi_put8(acc_handle,
1830 1830 &scsi_raid_io->RaidContext.RAIDFlags,
1831 1831 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1832 1832 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1833 1833 ddi_put16(acc_handle,
1834 1834 &scsi_raid_io->RaidContext.timeoutValue,
1835 1835 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1836 1836 ddi_put16(acc_handle,
1837 1837 &scsi_raid_io->RaidContext.ldTargetId,
1838 1838 acmd->device_id);
1839 1839 ddi_put8(acc_handle,
1840 1840 &scsi_raid_io->LUN[1], acmd->lun);
1841 1841
1842 1842 if (instance->fast_path_io &&
1843 1843 instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1844 1844 uint16_t IoFlags = ddi_get16(acc_handle,
1845 1845 &scsi_raid_io->IoFlags);
1846 1846 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1847 1847 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, IoFlags);
1848 1848 }
1849 1849 ddi_put16(acc_handle, &ReqDescUnion->SCSIIO.DevHandle,
1850 1850 local_map_ptr->raidMap.
1851 1851 devHndlInfo[acmd->device_id].curDevHdl);
1852 1852
1853 1853 /* Release SYNC MAP UPDATE lock */
1854 1854 mutex_exit(&instance->sync_map_mtx);
1855 1855 #else
1856 1856 /* If no PD support, return here. */
1857 1857 return (cmd);
1858 1858 #endif
1859 1859 }
1860 1860
1861 1861 /* Set sense buffer physical address/length in scsi_io_request. */
1862 1862 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1863 1863 cmd->sense_phys_addr1);
1864 1864 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1865 1865
1866 1866 /* Construct SGL */
1867 1867 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1868 1868 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1869 1869
1870 1870 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1871 1871 scsi_raid_io, &datalen);
1872 1872
1873 1873 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1874 1874
1875 1875 con_log(CL_ANN, (CE_CONT,
1876 1876 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1877 1877 pkt->pkt_cdbp[0], acmd->device_id));
1878 1878 con_log(CL_DLEVEL1, (CE_CONT,
1879 1879 "data length = %x\n",
1880 1880 scsi_raid_io->DataLength));
1881 1881 con_log(CL_DLEVEL1, (CE_CONT,
1882 1882 "cdb length = %x\n",
1883 1883 acmd->cmd_cdblen));
1884 1884
1885 1885 return (cmd);
1886 1886 }
1887 1887
1888 1888 uint32_t
1889 1889 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1890 1890 {
1891 1891 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1892 1892 }
1893 1893
1894 1894 void
1895 1895 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1896 1896 {
1897 1897 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1898 1898 atomic_inc_16(&instance->fw_outstanding);
1899 1899
1900 1900 struct scsi_pkt *pkt;
1901 1901
1902 1902 con_log(CL_ANN1,
1903 1903 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1904 1904
1905 1905 con_log(CL_DLEVEL1, (CE_CONT,
1906 1906 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1907 1907 con_log(CL_DLEVEL1, (CE_CONT,
1908 1908 " [req desc low part] %x \n",
1909 1909 (uint_t)(req_desc->Words & 0xffffffffff)));
1910 1910 con_log(CL_DLEVEL1, (CE_CONT,
1911 1911 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1912 1912 pkt = cmd->pkt;
1913 1913
1914 1914 if (pkt) {
1915 1915 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1916 1916 "ISSUED CMD TO FW : called : cmd:"
1917 1917 ": %p instance : %p pkt : %p pkt_time : %x\n",
1918 1918 gethrtime(), (void *)cmd, (void *)instance,
1919 1919 (void *)pkt, cmd->drv_pkt_time));
1920 1920 if (instance->adapterresetinprogress) {
1921 1921 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1922 1922 con_log(CL_ANN, (CE_NOTE,
1923 1923 "TBOLT Reset the scsi_pkt timer"));
1924 1924 } else {
1925 1925 push_pending_mfi_pkt(instance, cmd);
1926 1926 }
1927 1927
1928 1928 } else {
1929 1929 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1930 1930 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
1931 1931 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
1932 1932 }
1933 1933
1934 1934 /* Issue the command to the FW */
1935 1935 mutex_enter(&instance->reg_write_mtx);
1936 1936 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1937 1937 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1938 1938 mutex_exit(&instance->reg_write_mtx);
1939 1939 }
1940 1940
1941 1941 /*
1942 1942 * issue_cmd_in_sync_mode
1943 1943 */
1944 1944 int
1945 1945 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
1946 1946 struct mrsas_cmd *cmd)
1947 1947 {
1948 1948 int i;
1949 1949 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
1950 1950 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1951 1951
1952 1952 struct mrsas_header *hdr;
1953 1953 hdr = (struct mrsas_header *)&cmd->frame->hdr;
1954 1954
1955 1955 con_log(CL_ANN,
1956 1956 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
1957 1957 cmd->SMID));
1958 1958
1959 1959
1960 1960 if (instance->adapterresetinprogress) {
1961 1961 cmd->drv_pkt_time = ddi_get16
1962 1962 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
1963 1963 if (cmd->drv_pkt_time < debug_timeout_g)
1964 1964 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
1965 1965 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
1966 1966 "RESET-IN-PROGRESS, issue cmd & return."));
1967 1967
1968 1968 mutex_enter(&instance->reg_write_mtx);
1969 1969 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1970 1970 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1971 1971 mutex_exit(&instance->reg_write_mtx);
1972 1972
1973 1973 return (DDI_SUCCESS);
1974 1974 } else {
1975 1975 con_log(CL_ANN1, (CE_NOTE,
1976 1976 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
1977 1977 push_pending_mfi_pkt(instance, cmd);
1978 1978 }
1979 1979
1980 1980 con_log(CL_DLEVEL2, (CE_NOTE,
1981 1981 "HighQport offset :%p",
1982 1982 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
1983 1983 con_log(CL_DLEVEL2, (CE_NOTE,
1984 1984 "LowQport offset :%p",
1985 1985 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
1986 1986
1987 1987 cmd->sync_cmd = MRSAS_TRUE;
1988 1988 cmd->cmd_status = ENODATA;
1989 1989
1990 1990
1991 1991 mutex_enter(&instance->reg_write_mtx);
1992 1992 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
1993 1993 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
1994 1994 mutex_exit(&instance->reg_write_mtx);
1995 1995
1996 1996 con_log(CL_ANN1, (CE_NOTE,
1997 1997 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
1998 1998 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
1999 1999 (uint_t)(req_desc->Words & 0xffffffff)));
2000 2000
2001 2001 mutex_enter(&instance->int_cmd_mtx);
2002 2002 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2003 2003 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2004 2004 }
2005 2005 mutex_exit(&instance->int_cmd_mtx);
2006 2006
2007 2007
2008 2008 if (i < (msecs -1)) {
2009 2009 return (DDI_SUCCESS);
2010 2010 } else {
2011 2011 return (DDI_FAILURE);
2012 2012 }
2013 2013 }
2014 2014
2015 2015 /*
2016 2016 * issue_cmd_in_poll_mode
2017 2017 */
2018 2018 int
2019 2019 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2020 2020 struct mrsas_cmd *cmd)
2021 2021 {
2022 2022 int i;
2023 2023 uint16_t flags;
2024 2024 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2025 2025 struct mrsas_header *frame_hdr;
2026 2026
2027 2027 con_log(CL_ANN,
2028 2028 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2029 2029 cmd->SMID));
2030 2030
2031 2031 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2032 2032
2033 2033 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2034 2034 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2035 2035 MFI_CMD_STATUS_POLL_MODE);
2036 2036 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2037 2037 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2038 2038 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2039 2039
2040 2040 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2041 2041 (uint_t)(req_desc->Words & 0xffffffff)));
2042 2042 con_log(CL_ANN1, (CE_NOTE,
2043 2043 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2044 2044
2045 2045 /* issue the frame using inbound queue port */
2046 2046 mutex_enter(&instance->reg_write_mtx);
2047 2047 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2048 2048 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2049 2049 mutex_exit(&instance->reg_write_mtx);
2050 2050
2051 2051 for (i = 0; i < msecs && (
2052 2052 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2053 2053 == MFI_CMD_STATUS_POLL_MODE); i++) {
2054 2054 /* wait for cmd_status to change from 0xFF */
2055 2055 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2056 2056 }
2057 2057
2058 2058 DTRACE_PROBE1(tbolt_complete_poll_cmd, uint8_t, i);
2059 2059
2060 2060 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2061 2061 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2062 2062 con_log(CL_ANN1, (CE_NOTE,
2063 2063 " cmd failed %" PRIx64, (req_desc->Words)));
2064 2064 return (DDI_FAILURE);
2065 2065 }
2066 2066
2067 2067 return (DDI_SUCCESS);
2068 2068 }
2069 2069
2070 2070 void
2071 2071 tbolt_enable_intr(struct mrsas_instance *instance)
2072 2072 {
2073 2073 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2074 2074 /* writel(~0, ®s->outbound_intr_status); */
2075 2075 /* readl(®s->outbound_intr_status); */
2076 2076
2077 2077 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2078 2078
2079 2079 /* dummy read to force PCI flush */
2080 2080 (void) RD_OB_INTR_MASK(instance);
2081 2081
2082 2082 }
2083 2083
2084 2084 void
2085 2085 tbolt_disable_intr(struct mrsas_instance *instance)
2086 2086 {
2087 2087 uint32_t mask = 0xFFFFFFFF;
2088 2088
2089 2089 WR_OB_INTR_MASK(mask, instance);
2090 2090
2091 2091 /* Dummy readl to force pci flush */
2092 2092
2093 2093 (void) RD_OB_INTR_MASK(instance);
2094 2094 }
2095 2095
2096 2096
2097 2097 int
2098 2098 tbolt_intr_ack(struct mrsas_instance *instance)
2099 2099 {
2100 2100 uint32_t status;
2101 2101
2102 2102 /* check if it is our interrupt */
2103 2103 status = RD_OB_INTR_STATUS(instance);
2104 2104 con_log(CL_ANN1, (CE_NOTE,
2105 2105 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2106 2106
2107 2107 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2108 2108 return (DDI_INTR_UNCLAIMED);
2109 2109 }
2110 2110
2111 2111 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2112 2112 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2113 2113 return (DDI_INTR_UNCLAIMED);
2114 2114 }
2115 2115
2116 2116 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2117 2117 /* clear the interrupt by writing back the same value */
2118 2118 WR_OB_INTR_STATUS(status, instance);
2119 2119 /* dummy READ */
2120 2120 (void) RD_OB_INTR_STATUS(instance);
2121 2121 }
2122 2122 return (DDI_INTR_CLAIMED);
2123 2123 }
2124 2124
2125 2125 /*
2126 2126 * get_raid_msg_pkt : Get a command from the free pool
2127 2127 * After successful allocation, the caller of this routine
2128 2128 * must clear the frame buffer (memset to zero) before
2129 2129 * using the packet further.
2130 2130 *
2131 2131 * ***** Note *****
2132 2132 * After clearing the frame buffer the context id of the
2133 2133 * frame buffer SHOULD be restored back.
2134 2134 */
2135 2135
2136 2136 struct mrsas_cmd *
2137 2137 get_raid_msg_pkt(struct mrsas_instance *instance)
2138 2138 {
2139 2139 mlist_t *head = &instance->cmd_pool_list;
2140 2140 struct mrsas_cmd *cmd = NULL;
2141 2141
2142 2142 mutex_enter(&instance->cmd_pool_mtx);
2143 2143 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2144 2144
2145 2145
2146 2146 if (!mlist_empty(head)) {
2147 2147 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2148 2148 mlist_del_init(head->next);
2149 2149 }
2150 2150 if (cmd != NULL) {
2151 2151 cmd->pkt = NULL;
2152 2152 cmd->retry_count_for_ocr = 0;
2153 2153 cmd->drv_pkt_time = 0;
2154 2154 }
2155 2155 mutex_exit(&instance->cmd_pool_mtx);
2156 2156
2157 2157 if (cmd != NULL)
2158 2158 bzero(cmd->scsi_io_request,
2159 2159 sizeof (Mpi2RaidSCSIIORequest_t));
2160 2160 return (cmd);
2161 2161 }
2162 2162
2163 2163 struct mrsas_cmd *
2164 2164 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2165 2165 {
2166 2166 mlist_t *head = &instance->cmd_app_pool_list;
2167 2167 struct mrsas_cmd *cmd = NULL;
2168 2168
2169 2169 mutex_enter(&instance->cmd_app_pool_mtx);
2170 2170 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2171 2171
2172 2172 if (!mlist_empty(head)) {
2173 2173 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2174 2174 mlist_del_init(head->next);
2175 2175 }
2176 2176 if (cmd != NULL) {
2177 2177 cmd->retry_count_for_ocr = 0;
2178 2178 cmd->drv_pkt_time = 0;
2179 2179 cmd->pkt = NULL;
2180 2180 cmd->request_desc = NULL;
2181 2181
2182 2182 }
2183 2183
2184 2184 mutex_exit(&instance->cmd_app_pool_mtx);
2185 2185
2186 2186 if (cmd != NULL) {
2187 2187 bzero(cmd->scsi_io_request,
2188 2188 sizeof (Mpi2RaidSCSIIORequest_t));
2189 2189 }
2190 2190
2191 2191 return (cmd);
2192 2192 }
2193 2193
2194 2194 /*
2195 2195 * return_raid_msg_pkt : Return a cmd to free command pool
2196 2196 */
2197 2197 void
2198 2198 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2199 2199 {
2200 2200 mutex_enter(&instance->cmd_pool_mtx);
2201 2201 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2202 2202
2203 2203
2204 2204 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2205 2205
2206 2206 mutex_exit(&instance->cmd_pool_mtx);
2207 2207 }
2208 2208
2209 2209 void
2210 2210 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2211 2211 {
2212 2212 mutex_enter(&instance->cmd_app_pool_mtx);
2213 2213 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2214 2214
2215 2215 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2216 2216
2217 2217 mutex_exit(&instance->cmd_app_pool_mtx);
2218 2218 }
2219 2219
2220 2220
2221 2221 void
2222 2222 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2223 2223 struct mrsas_cmd *cmd)
2224 2224 {
2225 2225 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2226 2226 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2227 2227 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2228 2228 uint32_t index;
2229 2229 ddi_acc_handle_t acc_handle =
2230 2230 instance->mpi2_frame_pool_dma_obj.acc_handle;
2231 2231
2232 2232 if (!instance->tbolt) {
2233 2233 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2234 2234 return;
2235 2235 }
2236 2236
2237 2237 index = cmd->index;
2238 2238
2239 2239 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2240 2240
2241 2241 if (!ReqDescUnion) {
2242 2242 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2243 2243 return;
2244 2244 }
2245 2245
2246 2246 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2247 2247
2248 2248 ReqDescUnion->Words = 0;
2249 2249
2250 2250 ReqDescUnion->SCSIIO.RequestFlags =
2251 2251 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2252 2252 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2253 2253
2254 2254 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2255 2255
2256 2256 cmd->request_desc = ReqDescUnion;
2257 2257
2258 2258 /* get raid message frame pointer */
2259 2259 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2260 2260
2261 2261 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2262 2262 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2263 2263 &scsi_raid_io->SGL.IeeeChain;
2264 2264 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2265 2265 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2266 2266 }
2267 2267
2268 2268 ddi_put8(acc_handle, &scsi_raid_io->Function,
2269 2269 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2270 2270
2271 2271 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2272 2272 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2273 2273
2274 2274 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2275 2275 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2276 2276
2277 2277 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2278 2278 cmd->sense_phys_addr1);
2279 2279
2280 2280
2281 2281 scsi_raid_io_sgl_ieee =
2282 2282 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2283 2283
2284 2284 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2285 2285 (U64)cmd->frame_phys_addr);
2286 2286
2287 2287 ddi_put8(acc_handle,
2288 2288 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2289 2289 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2290 2290 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2291 2291 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2292 2292
2293 2293 con_log(CL_ANN1, (CE_NOTE,
2294 2294 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2295 2295 scsi_raid_io_sgl_ieee->Address));
2296 2296 con_log(CL_ANN1, (CE_NOTE,
2297 2297 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2298 2298 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2299 2299 scsi_raid_io_sgl_ieee->Flags));
2300 2300 }
2301 2301
2302 2302
2303 2303 void
2304 2304 tbolt_complete_cmd(struct mrsas_instance *instance,
2305 2305 struct mrsas_cmd *cmd)
2306 2306 {
2307 2307 uint8_t status;
2308 2308 uint8_t extStatus;
2309 2309 uint8_t function;
2310 2310 uint8_t arm;
2311 2311 struct scsa_cmd *acmd;
2312 2312 struct scsi_pkt *pkt;
2313 2313 struct scsi_arq_status *arqstat;
2314 2314 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2315 2315 LD_LOAD_BALANCE_INFO *lbinfo;
2316 2316 ddi_acc_handle_t acc_handle =
2317 2317 instance->mpi2_frame_pool_dma_obj.acc_handle;
2318 2318
2319 2319 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2320 2320
2321 2321 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2322 2322 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2323 2323
2324 2324 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2325 2325 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2326 2326
2327 2327 if (status != MFI_STAT_OK) {
2328 2328 con_log(CL_ANN, (CE_WARN,
2329 2329 "IO Cmd Failed SMID %x", cmd->SMID));
2330 2330 } else {
2331 2331 con_log(CL_ANN, (CE_NOTE,
2332 2332 "IO Cmd Success SMID %x", cmd->SMID));
2333 2333 }
2334 2334
2335 2335 /* regular commands */
2336 2336
2337 2337 function = ddi_get8(acc_handle, &scsi_raid_io->Function);
2338 2338 DTRACE_PROBE3(tbolt_complete_cmd, uint8_t, function,
2339 2339 uint8_t, status, uint8_t, extStatus);
2340 2340
2341 2341 switch (function) {
2342 2342
2343 2343 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2344 2344 acmd = (struct scsa_cmd *)cmd->cmd;
2345 2345 lbinfo = &instance->load_balance_info[acmd->device_id];
2346 2346
2347 2347 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2348 2348 arm = lbinfo->raid1DevHandle[0] ==
2349 2349 scsi_raid_io->DevHandle ? 0 : 1;
2350 2350
2351 2351 lbinfo->scsi_pending_cmds[arm]--;
2352 2352 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2353 2353 }
2354 2354 con_log(CL_DLEVEL3, (CE_NOTE,
2355 2355 "FastPath IO Completion Success "));
2356 2356 /* FALLTHRU */
2357 2357
2358 2358 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2359 2359 acmd = (struct scsa_cmd *)cmd->cmd;
2360 2360 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2361 2361
2362 2362 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2363 2363 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2364 2364 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2365 2365 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2366 2366 DDI_DMA_SYNC_FORCPU);
2367 2367 }
2368 2368 }
2369 2369
2370 2370 pkt->pkt_reason = CMD_CMPLT;
2371 2371 pkt->pkt_statistics = 0;
2372 2372 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2373 2373 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2374 2374
2375 2375 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2376 2376 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2377 2377 ((acmd->islogical) ? "LD" : "PD"),
2378 2378 acmd->cmd_dmacount, cmd->SMID, status));
2379 2379
2380 2380 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2381 2381 struct scsi_inquiry *inq;
2382 2382
2383 2383 if (acmd->cmd_dmacount != 0) {
2384 2384 bp_mapin(acmd->cmd_buf);
2385 2385 inq = (struct scsi_inquiry *)
2386 2386 acmd->cmd_buf->b_un.b_addr;
2387 2387
2388 2388 /* don't expose physical drives to OS */
2389 2389 if (acmd->islogical &&
2390 2390 (status == MFI_STAT_OK)) {
2391 2391 display_scsi_inquiry((caddr_t)inq);
2392 2392 #ifdef PDSUPPORT
2393 2393 } else if ((status == MFI_STAT_OK) &&
2394 2394 inq->inq_dtype == DTYPE_DIRECT) {
2395 2395 display_scsi_inquiry((caddr_t)inq);
2396 2396 #endif
2397 2397 } else {
2398 2398 /* for physical disk */
2399 2399 status = MFI_STAT_DEVICE_NOT_FOUND;
2400 2400 }
2401 2401 }
2402 2402 }
2403 2403
2404 2404 switch (status) {
2405 2405 case MFI_STAT_OK:
2406 2406 pkt->pkt_scbp[0] = STATUS_GOOD;
2407 2407 break;
2408 2408 case MFI_STAT_LD_CC_IN_PROGRESS:
2409 2409 case MFI_STAT_LD_RECON_IN_PROGRESS:
2410 2410 pkt->pkt_scbp[0] = STATUS_GOOD;
2411 2411 break;
2412 2412 case MFI_STAT_LD_INIT_IN_PROGRESS:
2413 2413 pkt->pkt_reason = CMD_TRAN_ERR;
2414 2414 break;
2415 2415 case MFI_STAT_SCSI_IO_FAILED:
2416 2416 dev_err(instance->dip, CE_WARN,
2417 2417 "tbolt_complete_cmd: scsi_io failed");
2418 2418 pkt->pkt_reason = CMD_TRAN_ERR;
2419 2419 break;
2420 2420 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2421 2421 con_log(CL_ANN, (CE_WARN,
2422 2422 "tbolt_complete_cmd: scsi_done with error"));
2423 2423
2424 2424 pkt->pkt_reason = CMD_CMPLT;
2425 2425 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2426 2426
2427 2427 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2428 2428 con_log(CL_ANN,
2429 2429 (CE_WARN, "TEST_UNIT_READY fail"));
2430 2430 } else {
2431 2431 pkt->pkt_state |= STATE_ARQ_DONE;
2432 2432 arqstat = (void *)(pkt->pkt_scbp);
2433 2433 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2434 2434 arqstat->sts_rqpkt_resid = 0;
2435 2435 arqstat->sts_rqpkt_state |=
2436 2436 STATE_GOT_BUS | STATE_GOT_TARGET
2437 2437 | STATE_SENT_CMD
2438 2438 | STATE_XFERRED_DATA;
2439 2439 *(uint8_t *)&arqstat->sts_rqpkt_status =
2440 2440 STATUS_GOOD;
2441 2441 con_log(CL_ANN1,
2442 2442 (CE_NOTE, "Copying Sense data %x",
2443 2443 cmd->SMID));
2444 2444
2445 2445 ddi_rep_get8(acc_handle,
2446 2446 (uint8_t *)&(arqstat->sts_sensedata),
2447 2447 cmd->sense1,
2448 2448 sizeof (struct scsi_extended_sense),
2449 2449 DDI_DEV_AUTOINCR);
2450 2450
2451 2451 }
2452 2452 break;
2453 2453 case MFI_STAT_LD_OFFLINE:
2454 2454 dev_err(instance->dip, CE_WARN,
2455 2455 "tbolt_complete_cmd: ld offline "
2456 2456 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2457 2457 /* UNDO: */
2458 2458 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2459 2459
2460 2460 ddi_get16(acc_handle,
2461 2461 &scsi_raid_io->RaidContext.ldTargetId),
2462 2462
2463 2463 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2464 2464
2465 2465 pkt->pkt_reason = CMD_DEV_GONE;
2466 2466 pkt->pkt_statistics = STAT_DISCON;
2467 2467 break;
2468 2468 case MFI_STAT_DEVICE_NOT_FOUND:
2469 2469 con_log(CL_ANN, (CE_CONT,
2470 2470 "tbolt_complete_cmd: device not found error"));
2471 2471 pkt->pkt_reason = CMD_DEV_GONE;
2472 2472 pkt->pkt_statistics = STAT_DISCON;
2473 2473 break;
2474 2474
2475 2475 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2476 2476 pkt->pkt_state |= STATE_ARQ_DONE;
2477 2477 pkt->pkt_reason = CMD_CMPLT;
2478 2478 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2479 2479
2480 2480 arqstat = (void *)(pkt->pkt_scbp);
2481 2481 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2482 2482 arqstat->sts_rqpkt_resid = 0;
2483 2483 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2484 2484 | STATE_GOT_TARGET | STATE_SENT_CMD
2485 2485 | STATE_XFERRED_DATA;
2486 2486 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2487 2487
2488 2488 arqstat->sts_sensedata.es_valid = 1;
2489 2489 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2490 2490 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2491 2491
2492 2492 /*
2493 2493 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2494 2494 * ASC: 0x21h; ASCQ: 0x00h;
2495 2495 */
2496 2496 arqstat->sts_sensedata.es_add_code = 0x21;
2497 2497 arqstat->sts_sensedata.es_qual_code = 0x00;
2498 2498 break;
2499 2499 case MFI_STAT_INVALID_CMD:
2500 2500 case MFI_STAT_INVALID_DCMD:
2501 2501 case MFI_STAT_INVALID_PARAMETER:
2502 2502 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2503 2503 default:
2504 2504 dev_err(instance->dip, CE_WARN,
2505 2505 "tbolt_complete_cmd: Unknown status!");
2506 2506 pkt->pkt_reason = CMD_TRAN_ERR;
2507 2507
2508 2508 break;
2509 2509 }
2510 2510
2511 2511 atomic_add_16(&instance->fw_outstanding, (-1));
2512 2512
2513 2513 (void) mrsas_common_check(instance, cmd);
2514 2514 if (acmd->cmd_dmahandle) {
2515 2515 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2516 2516 DDI_SUCCESS) {
2517 2517 ddi_fm_service_impact(instance->dip,
2518 2518 DDI_SERVICE_UNAFFECTED);
2519 2519 pkt->pkt_reason = CMD_TRAN_ERR;
2520 2520 pkt->pkt_statistics = 0;
2521 2521 }
2522 2522 }
2523 2523
2524 2524 /* Call the callback routine */
2525 2525 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2526 2526 (*pkt->pkt_comp)(pkt);
2527 2527
2528 2528 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2529 2529
2530 2530 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2531 2531
2532 2532 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2533 2533
2534 2534 return_raid_msg_pkt(instance, cmd);
2535 2535 break;
2536 2536 }
2537 2537 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2538 2538
2539 2539 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2540 2540 cmd->frame->dcmd.mbox.b[1] == 1) {
2541 2541
2542 2542 mutex_enter(&instance->sync_map_mtx);
2543 2543
2544 2544 con_log(CL_ANN, (CE_NOTE,
2545 2545 "LDMAP sync command SMID RECEIVED 0x%X",
2546 2546 cmd->SMID));
2547 2547 if (cmd->frame->hdr.cmd_status != 0) {
2548 2548 dev_err(instance->dip, CE_WARN,
2549 2549 "map sync failed, status = 0x%x.",
2550 2550 cmd->frame->hdr.cmd_status);
2551 2551 } else {
2552 2552 instance->map_id++;
2553 2553 con_log(CL_ANN1, (CE_NOTE,
2554 2554 "map sync received, switched map_id to %"
2555 2555 PRIu64, instance->map_id));
2556 2556 }
2557 2557
2558 2558 if (MR_ValidateMapInfo(
2559 2559 instance->ld_map[instance->map_id & 1],
2560 2560 instance->load_balance_info)) {
2561 2561 instance->fast_path_io = 1;
2562 2562 } else {
2563 2563 instance->fast_path_io = 0;
2564 2564 }
2565 2565
2566 2566 con_log(CL_ANN, (CE_NOTE,
2567 2567 "instance->fast_path_io %d",
2568 2568 instance->fast_path_io));
2569 2569
2570 2570 instance->unroll.syncCmd = 0;
2571 2571
2572 2572 if (instance->map_update_cmd == cmd) {
2573 2573 return_raid_msg_pkt(instance, cmd);
2574 2574 atomic_add_16(&instance->fw_outstanding, (-1));
2575 2575 (void) mrsas_tbolt_sync_map_info(instance);
2576 2576 }
2577 2577
2578 2578 con_log(CL_ANN1, (CE_NOTE,
2579 2579 "LDMAP sync completed, ldcount=%d",
2580 2580 instance->ld_map[instance->map_id & 1]
2581 2581 ->raidMap.ldCount));
2582 2582 mutex_exit(&instance->sync_map_mtx);
2583 2583 break;
2584 2584 }
2585 2585
2586 2586 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2587 2587 con_log(CL_ANN1, (CE_CONT,
2588 2588 "AEN command SMID RECEIVED 0x%X",
2589 2589 cmd->SMID));
2590 2590 if ((instance->aen_cmd == cmd) &&
2591 2591 (instance->aen_cmd->abort_aen)) {
2592 2592 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2593 2593 "aborted_aen returned"));
2594 2594 } else {
2595 2595 atomic_add_16(&instance->fw_outstanding, (-1));
2596 2596 service_mfi_aen(instance, cmd);
2597 2597 }
2598 2598 }
2599 2599
2600 2600 if (cmd->sync_cmd == MRSAS_TRUE) {
2601 2601 con_log(CL_ANN1, (CE_CONT,
2602 2602 "Sync-mode Command Response SMID RECEIVED 0x%X",
2603 2603 cmd->SMID));
2604 2604
2605 2605 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2606 2606 } else {
2607 2607 con_log(CL_ANN, (CE_CONT,
2608 2608 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2609 2609 cmd->SMID));
2610 2610 }
2611 2611 break;
2612 2612 default:
2613 2613 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2614 2614 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2615 2615
2616 2616 /* free message */
2617 2617 con_log(CL_ANN,
2618 2618 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2619 2619 break;
2620 2620 }
2621 2621 }
2622 2622
2623 2623 uint_t
2624 2624 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2625 2625 {
2626 2626 uint8_t replyType;
2627 2627 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2628 2628 Mpi2ReplyDescriptorsUnion_t *desc;
2629 2629 uint16_t smid;
2630 2630 union desc_value d_val;
2631 2631 struct mrsas_cmd *cmd;
2632 2632
2633 2633 struct mrsas_header *hdr;
2634 2634 struct scsi_pkt *pkt;
2635 2635
2636 2636 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2637 2637 0, 0, DDI_DMA_SYNC_FORDEV);
2638 2638
2639 2639 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2640 2640 0, 0, DDI_DMA_SYNC_FORCPU);
2641 2641
2642 2642 desc = instance->reply_frame_pool;
2643 2643 desc += instance->reply_read_index;
2644 2644
2645 2645 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2646 2646 replyType = replyDesc->ReplyFlags &
2647 2647 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2648 2648
2649 2649 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2650 2650 return (DDI_INTR_UNCLAIMED);
2651 2651
2652 2652 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2653 2653 != DDI_SUCCESS) {
2654 2654 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2655 2655 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2656 2656 con_log(CL_ANN1,
2657 2657 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2658 2658 "FMA check, returning DDI_INTR_UNCLAIMED"));
2659 2659 return (DDI_INTR_CLAIMED);
2660 2660 }
2661 2661
2662 2662 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2663 2663 (void *)desc, desc->Words));
2664 2664
2665 2665 d_val.word = desc->Words;
2666 2666
2667 2667
2668 2668 /* Read Reply descriptor */
2669 2669 while ((d_val.u1.low != 0xffffffff) &&
2670 2670 (d_val.u1.high != 0xffffffff)) {
2671 2671
2672 2672 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2673 2673 0, 0, DDI_DMA_SYNC_FORCPU);
2674 2674
2675 2675 smid = replyDesc->SMID;
2676 2676
2677 2677 if (!smid || smid > instance->max_fw_cmds + 1) {
2678 2678 con_log(CL_ANN1, (CE_NOTE,
2679 2679 "Reply Desc at Break = %p Words = %" PRIx64,
2680 2680 (void *)desc, desc->Words));
2681 2681 break;
2682 2682 }
2683 2683
2684 2684 cmd = instance->cmd_list[smid - 1];
2685 2685 if (!cmd) {
2686 2686 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2687 2687 "outstanding_cmd: Invalid command "
2688 2688 " or Poll commad Received in completion path"));
2689 2689 } else {
2690 2690 mutex_enter(&instance->cmd_pend_mtx);
2691 2691 if (cmd->sync_cmd == MRSAS_TRUE) {
2692 2692 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2693 2693 if (hdr) {
2694 2694 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2695 2695 "tbolt_process_outstanding_cmd:"
2696 2696 " mlist_del_init(&cmd->list)."));
2697 2697 mlist_del_init(&cmd->list);
2698 2698 }
2699 2699 } else {
2700 2700 pkt = cmd->pkt;
2701 2701 if (pkt) {
2702 2702 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2703 2703 "tbolt_process_outstanding_cmd:"
2704 2704 "mlist_del_init(&cmd->list)."));
2705 2705 mlist_del_init(&cmd->list);
2706 2706 }
2707 2707 }
2708 2708
2709 2709 mutex_exit(&instance->cmd_pend_mtx);
2710 2710
2711 2711 tbolt_complete_cmd(instance, cmd);
2712 2712 }
2713 2713 /* set it back to all 1s. */
2714 2714 desc->Words = -1LL;
2715 2715
2716 2716 instance->reply_read_index++;
2717 2717
2718 2718 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2719 2719 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2720 2720 instance->reply_read_index = 0;
2721 2721 }
2722 2722
2723 2723 /* Get the next reply descriptor */
2724 2724 if (!instance->reply_read_index)
2725 2725 desc = instance->reply_frame_pool;
2726 2726 else
2727 2727 desc++;
2728 2728
2729 2729 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2730 2730
2731 2731 d_val.word = desc->Words;
2732 2732
2733 2733 con_log(CL_ANN1, (CE_NOTE,
2734 2734 "Next Reply Desc = %p Words = %" PRIx64,
2735 2735 (void *)desc, desc->Words));
2736 2736
2737 2737 replyType = replyDesc->ReplyFlags &
2738 2738 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2739 2739
2740 2740 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2741 2741 break;
2742 2742
2743 2743 } /* End of while loop. */
2744 2744
2745 2745 /* update replyIndex to FW */
2746 2746 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2747 2747
2748 2748
2749 2749 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2750 2750 0, 0, DDI_DMA_SYNC_FORDEV);
2751 2751
2752 2752 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2753 2753 0, 0, DDI_DMA_SYNC_FORCPU);
2754 2754 return (DDI_INTR_CLAIMED);
2755 2755 }
2756 2756
2757 2757
2758 2758
2759 2759
2760 2760 /*
2761 2761 * complete_cmd_in_sync_mode - Completes an internal command
2762 2762 * @instance: Adapter soft state
2763 2763 * @cmd: Command to be completed
2764 2764 *
2765 2765 * The issue_cmd_in_sync_mode() function waits for a command to complete
2766 2766 * after it issues a command. This function wakes up that waiting routine by
2767 2767 * calling wake_up() on the wait queue.
2768 2768 */
2769 2769 void
2770 2770 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2771 2771 struct mrsas_cmd *cmd)
2772 2772 {
2773 2773
2774 2774 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2775 2775 &cmd->frame->io.cmd_status);
2776 2776
2777 2777 cmd->sync_cmd = MRSAS_FALSE;
2778 2778
2779 2779 mutex_enter(&instance->int_cmd_mtx);
2780 2780 if (cmd->cmd_status == ENODATA) {
2781 2781 cmd->cmd_status = 0;
2782 2782 }
2783 2783 cv_broadcast(&instance->int_cmd_cv);
2784 2784 mutex_exit(&instance->int_cmd_mtx);
2785 2785
2786 2786 }
2787 2787
2788 2788 /*
2789 2789 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2790 2790 * instance: Adapter soft state
2791 2791 *
2792 2792 * Issues an internal command (DCMD) to get the FW's controller PD
2793 2793 * list structure. This information is mainly used to find out SYSTEM
2794 2794 * supported by the FW.
2795 2795 */
2796 2796 int
2797 2797 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2798 2798 {
2799 2799 int ret = 0;
2800 2800 struct mrsas_cmd *cmd = NULL;
2801 2801 struct mrsas_dcmd_frame *dcmd;
2802 2802 MR_FW_RAID_MAP_ALL *ci;
2803 2803 uint32_t ci_h = 0;
2804 2804 U32 size_map_info;
2805 2805
2806 2806 cmd = get_raid_msg_pkt(instance);
2807 2807
2808 2808 if (cmd == NULL) {
2809 2809 dev_err(instance->dip, CE_WARN,
2810 2810 "Failed to get a cmd from free-pool in get_ld_map_info()");
2811 2811 return (DDI_FAILURE);
2812 2812 }
2813 2813
2814 2814 dcmd = &cmd->frame->dcmd;
2815 2815
2816 2816 size_map_info = sizeof (MR_FW_RAID_MAP) +
2817 2817 (sizeof (MR_LD_SPAN_MAP) *
2818 2818 (MAX_LOGICAL_DRIVES - 1));
2819 2819
2820 2820 con_log(CL_ANN, (CE_NOTE,
2821 2821 "size_map_info : 0x%x", size_map_info));
2822 2822
2823 2823 ci = instance->ld_map[instance->map_id & 1];
2824 2824 ci_h = instance->ld_map_phy[instance->map_id & 1];
2825 2825
2826 2826 if (!ci) {
2827 2827 dev_err(instance->dip, CE_WARN,
2828 2828 "Failed to alloc mem for ld_map_info");
2829 2829 return_raid_msg_pkt(instance, cmd);
2830 2830 return (-1);
2831 2831 }
2832 2832
2833 2833 bzero(ci, sizeof (*ci));
2834 2834 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2835 2835
2836 2836 dcmd->cmd = MFI_CMD_OP_DCMD;
2837 2837 dcmd->cmd_status = 0xFF;
2838 2838 dcmd->sge_count = 1;
2839 2839 dcmd->flags = MFI_FRAME_DIR_READ;
2840 2840 dcmd->timeout = 0;
2841 2841 dcmd->pad_0 = 0;
2842 2842 dcmd->data_xfer_len = size_map_info;
2843 2843 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2844 2844 dcmd->sgl.sge32[0].phys_addr = ci_h;
2845 2845 dcmd->sgl.sge32[0].length = size_map_info;
2846 2846
2847 2847
2848 2848 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2849 2849
2850 2850 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2851 2851 ret = 0;
2852 2852 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2853 2853 } else {
2854 2854 dev_err(instance->dip, CE_WARN, "Get LD Map Info failed");
2855 2855 ret = -1;
2856 2856 }
2857 2857
2858 2858 return_raid_msg_pkt(instance, cmd);
2859 2859
2860 2860 return (ret);
2861 2861 }
2862 2862
2863 2863 void
2864 2864 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2865 2865 {
2866 2866 uint32_t i;
2867 2867 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2868 2868 union desc_value d_val;
2869 2869
2870 2870 reply_desc = instance->reply_frame_pool;
2871 2871
2872 2872 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2873 2873 d_val.word = reply_desc->Words;
2874 2874 con_log(CL_DLEVEL3, (CE_NOTE,
2875 2875 "i=%d, %x:%x",
2876 2876 i, d_val.u1.high, d_val.u1.low));
2877 2877 }
2878 2878 }
2879 2879
2880 2880 /*
2881 2881 * mrsas_tbolt_command_create - Create command for fast path.
2882 2882 * @io_info: MegaRAID IO request packet pointer.
2883 2883 * @ref_tag: Reference tag for RD/WRPROTECT
2884 2884 *
2885 2885 * Create the command for fast path.
2886 2886 */
2887 2887 void
2888 2888 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2889 2889 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2890 2890 U32 ref_tag)
2891 2891 {
2892 2892 uint16_t EEDPFlags;
2893 2893 uint32_t Control;
2894 2894 ddi_acc_handle_t acc_handle =
2895 2895 instance->mpi2_frame_pool_dma_obj.acc_handle;
2896 2896
2897 2897 /* Prepare 32-byte CDB if DIF is supported on this device */
2898 2898 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2899 2899
2900 2900 bzero(cdb, 32);
2901 2901
2902 2902 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2903 2903
2904 2904
2905 2905 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2906 2906
2907 2907 if (io_info->isRead)
2908 2908 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2909 2909 else
2910 2910 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2911 2911
2912 2912 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2913 2913 cdb[10] = MRSAS_RD_WR_PROTECT;
2914 2914
2915 2915 /* LOGICAL BLOCK ADDRESS */
2916 2916 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2917 2917 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2918 2918 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2919 2919 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2920 2920 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2921 2921 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2922 2922 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2923 2923 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2924 2924
2925 2925 /* Logical block reference tag */
2926 2926 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2927 2927 BE_32(ref_tag));
2928 2928
2929 2929 ddi_put16(acc_handle,
2930 2930 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
2931 2931
2932 2932 ddi_put32(acc_handle, &scsi_io_request->DataLength,
2933 2933 ((io_info->numBlocks)*512));
2934 2934 /* Specify 32-byte cdb */
2935 2935 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
2936 2936
2937 2937 /* Transfer length */
2938 2938 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
2939 2939 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
2940 2940 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
2941 2941 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
2942 2942
2943 2943 /* set SCSI IO EEDPFlags */
2944 2944 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
2945 2945 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
2946 2946
2947 2947 /* set SCSI IO EEDPFlags bits */
2948 2948 if (io_info->isRead) {
2949 2949 /*
2950 2950 * For READ commands, the EEDPFlags shall be set to specify to
2951 2951 * Increment the Primary Reference Tag, to Check the Reference
2952 2952 * Tag, and to Check and Remove the Protection Information
2953 2953 * fields.
2954 2954 */
2955 2955 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2956 2956 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2957 2957 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2958 2958 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2959 2959 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2960 2960 } else {
2961 2961 /*
2962 2962 * For WRITE commands, the EEDPFlags shall be set to specify to
2963 2963 * Increment the Primary Reference Tag, and to Insert
2964 2964 * Protection Information fields.
2965 2965 */
2966 2966 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2967 2967 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2968 2968 }
2969 2969 Control |= (0x4 << 26);
2970 2970
2971 2971 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
2972 2972 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
2973 2973 ddi_put32(acc_handle,
2974 2974 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
2975 2975 }
2976 2976
2977 2977
2978 2978 /*
2979 2979 * mrsas_tbolt_set_pd_lba - Sets PD LBA
2980 2980 * @cdb: CDB
2981 2981 * @cdb_len: cdb length
2982 2982 * @start_blk: Start block of IO
2983 2983 *
2984 2984 * Used to set the PD LBA in CDB for FP IOs
2985 2985 */
2986 2986 static void
2987 2987 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
2988 2988 U32 num_blocks)
2989 2989 {
2990 2990 U8 cdb_len = *cdb_len_ptr;
2991 2991 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
2992 2992
2993 2993 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
2994 2994 if (((cdb_len == 12) || (cdb_len == 16)) &&
2995 2995 (start_blk <= 0xffffffff)) {
2996 2996 if (cdb_len == 16) {
2997 2997 con_log(CL_ANN,
2998 2998 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
2999 2999 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3000 3000 flagvals = cdb[1];
3001 3001 groupnum = cdb[14];
3002 3002 control = cdb[15];
3003 3003 } else {
3004 3004 con_log(CL_ANN,
3005 3005 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3006 3006 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3007 3007 flagvals = cdb[1];
3008 3008 groupnum = cdb[10];
3009 3009 control = cdb[11];
3010 3010 }
3011 3011
3012 3012 bzero(cdb, sizeof (cdb));
3013 3013
3014 3014 cdb[0] = opcode;
3015 3015 cdb[1] = flagvals;
3016 3016 cdb[6] = groupnum;
3017 3017 cdb[9] = control;
3018 3018 /* Set transfer length */
3019 3019 cdb[8] = (U8)(num_blocks & 0xff);
3020 3020 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3021 3021 cdb_len = 10;
3022 3022 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3023 3023 /* Convert to 16 byte CDB for large LBA's */
3024 3024 con_log(CL_ANN,
3025 3025 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3026 3026 switch (cdb_len) {
3027 3027 case 6:
3028 3028 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3029 3029 control = cdb[5];
3030 3030 break;
3031 3031 case 10:
3032 3032 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3033 3033 flagvals = cdb[1];
3034 3034 groupnum = cdb[6];
3035 3035 control = cdb[9];
3036 3036 break;
3037 3037 case 12:
3038 3038 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3039 3039 flagvals = cdb[1];
3040 3040 groupnum = cdb[10];
3041 3041 control = cdb[11];
3042 3042 break;
3043 3043 }
3044 3044
3045 3045 bzero(cdb, sizeof (cdb));
3046 3046
3047 3047 cdb[0] = opcode;
3048 3048 cdb[1] = flagvals;
3049 3049 cdb[14] = groupnum;
3050 3050 cdb[15] = control;
3051 3051
3052 3052 /* Transfer length */
3053 3053 cdb[13] = (U8)(num_blocks & 0xff);
3054 3054 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3055 3055 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3056 3056 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3057 3057
3058 3058 /* Specify 16-byte cdb */
3059 3059 cdb_len = 16;
3060 3060 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3061 3061 /* convert to 10 byte CDB */
3062 3062 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3063 3063 control = cdb[5];
3064 3064
3065 3065 bzero(cdb, sizeof (cdb));
3066 3066 cdb[0] = opcode;
3067 3067 cdb[9] = control;
3068 3068
3069 3069 /* Set transfer length */
3070 3070 cdb[8] = (U8)(num_blocks & 0xff);
3071 3071 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3072 3072
3073 3073 /* Specify 10-byte cdb */
3074 3074 cdb_len = 10;
3075 3075 }
3076 3076
3077 3077
3078 3078 /* Fall through Normal case, just load LBA here */
3079 3079 switch (cdb_len) {
3080 3080 case 6:
3081 3081 {
3082 3082 U8 val = cdb[1] & 0xE0;
3083 3083 cdb[3] = (U8)(start_blk & 0xff);
3084 3084 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3085 3085 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3086 3086 break;
3087 3087 }
3088 3088 case 10:
3089 3089 cdb[5] = (U8)(start_blk & 0xff);
3090 3090 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3091 3091 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3092 3092 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3093 3093 break;
3094 3094 case 12:
3095 3095 cdb[5] = (U8)(start_blk & 0xff);
3096 3096 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3097 3097 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3098 3098 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3099 3099 break;
3100 3100
3101 3101 case 16:
3102 3102 cdb[9] = (U8)(start_blk & 0xff);
3103 3103 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3104 3104 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3105 3105 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3106 3106 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3107 3107 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3108 3108 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3109 3109 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3110 3110 break;
3111 3111 }
3112 3112
3113 3113 *cdb_len_ptr = cdb_len;
3114 3114 }
3115 3115
3116 3116
3117 3117 static int
3118 3118 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3119 3119 {
3120 3120 MR_FW_RAID_MAP_ALL *ld_map;
3121 3121
3122 3122 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3123 3123
3124 3124 ld_map = instance->ld_map[instance->map_id & 1];
3125 3125
3126 3126 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3127 3127 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3128 3128
3129 3129 if (MR_ValidateMapInfo(
3130 3130 instance->ld_map[instance->map_id & 1],
3131 3131 instance->load_balance_info)) {
3132 3132 con_log(CL_ANN,
3133 3133 (CE_CONT, "MR_ValidateMapInfo success"));
3134 3134
3135 3135 instance->fast_path_io = 1;
3136 3136 con_log(CL_ANN,
3137 3137 (CE_NOTE, "instance->fast_path_io %d",
3138 3138 instance->fast_path_io));
3139 3139
3140 3140 return (DDI_SUCCESS);
3141 3141 }
3142 3142
3143 3143 }
3144 3144
3145 3145 instance->fast_path_io = 0;
3146 3146 dev_err(instance->dip, CE_WARN, "MR_ValidateMapInfo failed");
3147 3147 con_log(CL_ANN, (CE_NOTE,
3148 3148 "instance->fast_path_io %d", instance->fast_path_io));
3149 3149
3150 3150 return (DDI_FAILURE);
3151 3151 }
3152 3152
3153 3153 /*
3154 3154 * Marks HBA as bad. This will be called either when an
3155 3155 * IO packet times out even after 3 FW resets
3156 3156 * or FW is found to be fault even after 3 continuous resets.
3157 3157 */
3158 3158
3159 3159 void
3160 3160 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3161 3161 {
3162 3162 dev_err(instance->dip, CE_NOTE, "TBOLT Kill adapter called");
3163 3163
3164 3164 if (instance->deadadapter == 1)
3165 3165 return;
3166 3166
3167 3167 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3168 3168 "Writing to doorbell with MFI_STOP_ADP "));
3169 3169 mutex_enter(&instance->ocr_flags_mtx);
3170 3170 instance->deadadapter = 1;
3171 3171 mutex_exit(&instance->ocr_flags_mtx);
3172 3172 instance->func_ptr->disable_intr(instance);
3173 3173 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3174 3174 /* Flush */
3175 3175 (void) RD_RESERVED0_REGISTER(instance);
3176 3176
3177 3177 (void) mrsas_print_pending_cmds(instance);
3178 3178 (void) mrsas_complete_pending_cmds(instance);
3179 3179 }
3180 3180
3181 3181 void
3182 3182 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3183 3183 {
3184 3184 int i;
3185 3185 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3186 3186 instance->reply_read_index = 0;
3187 3187
3188 3188 /* initializing reply address to 0xFFFFFFFF */
3189 3189 reply_desc = instance->reply_frame_pool;
3190 3190
3191 3191 for (i = 0; i < instance->reply_q_depth; i++) {
3192 3192 reply_desc->Words = (uint64_t)~0;
3193 3193 reply_desc++;
3194 3194 }
3195 3195 }
3196 3196
3197 3197 int
3198 3198 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3199 3199 {
3200 3200 uint32_t status = 0x00;
3201 3201 uint32_t retry = 0;
3202 3202 uint32_t cur_abs_reg_val;
3203 3203 uint32_t fw_state;
3204 3204 uint32_t abs_state;
3205 3205 uint32_t i;
3206 3206
3207 3207 con_log(CL_ANN, (CE_NOTE,
3208 3208 "mrsas_tbolt_reset_ppc entered"));
3209 3209
3210 3210 if (instance->deadadapter == 1) {
3211 3211 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3212 3212 "no more resets as HBA has been marked dead ");
3213 3213 return (DDI_FAILURE);
3214 3214 }
3215 3215
3216 3216 mutex_enter(&instance->ocr_flags_mtx);
3217 3217 instance->adapterresetinprogress = 1;
3218 3218 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3219 3219 "adpterresetinprogress flag set, time %llx", gethrtime()));
3220 3220 mutex_exit(&instance->ocr_flags_mtx);
3221 3221
3222 3222 instance->func_ptr->disable_intr(instance);
3223 3223
3224 3224 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3225 3225 for (i = 0; i < 3000; i++) {
3226 3226 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3227 3227 }
3228 3228
3229 3229 instance->reply_read_index = 0;
3230 3230
3231 3231 retry_reset:
3232 3232 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3233 3233 ":Resetting TBOLT "));
3234 3234
3235 3235 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3236 3236 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3237 3237 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3238 3238 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3239 3239 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3240 3240 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3241 3241 con_log(CL_ANN1, (CE_NOTE,
3242 3242 "mrsas_tbolt_reset_ppc: magic number written "
3243 3243 "to write sequence register"));
3244 3244 delay(100 * drv_usectohz(MILLISEC));
3245 3245 status = RD_TBOLT_HOST_DIAG(instance);
3246 3246 con_log(CL_ANN1, (CE_NOTE,
3247 3247 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3248 3248 "to write sequence register"));
3249 3249
3250 3250 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3251 3251 delay(100 * drv_usectohz(MILLISEC));
3252 3252 status = RD_TBOLT_HOST_DIAG(instance);
3253 3253 if (retry++ == 100) {
3254 3254 dev_err(instance->dip, CE_WARN,
3255 3255 "mrsas_tbolt_reset_ppc:"
3256 3256 "resetadapter bit is set already "
3257 3257 "check retry count %d", retry);
3258 3258 return (DDI_FAILURE);
3259 3259 }
3260 3260 }
3261 3261
3262 3262 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3263 3263 delay(100 * drv_usectohz(MILLISEC));
3264 3264
3265 3265 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3266 3266 (uint8_t *)((uintptr_t)(instance)->regmap +
3267 3267 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3268 3268
3269 3269 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3270 3270 delay(100 * drv_usectohz(MILLISEC));
3271 3271 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3272 3272 (uint8_t *)((uintptr_t)(instance)->regmap +
3273 3273 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3274 3274 if (retry++ == 100) {
3275 3275 /* Dont call kill adapter here */
3276 3276 /* RESET BIT ADAPTER is cleared by firmare */
3277 3277 /* mrsas_tbolt_kill_adapter(instance); */
3278 3278 dev_err(instance->dip, CE_WARN,
3279 3279 "%s(): RESET FAILED; return failure!!!", __func__);
3280 3280 return (DDI_FAILURE);
3281 3281 }
3282 3282 }
3283 3283
3284 3284 con_log(CL_ANN,
3285 3285 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3286 3286 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3287 3287 "Calling mfi_state_transition_to_ready"));
3288 3288
3289 3289 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3290 3290 retry = 0;
3291 3291 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3292 3292 delay(100 * drv_usectohz(MILLISEC));
3293 3293 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3294 3294 }
3295 3295 if (abs_state <= MFI_STATE_FW_INIT) {
3296 3296 dev_err(instance->dip, CE_WARN,
3297 3297 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3298 3298 "state = 0x%x, RETRY RESET.", abs_state);
3299 3299 goto retry_reset;
3300 3300 }
3301 3301
3302 3302 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3303 3303 if (mfi_state_transition_to_ready(instance) ||
3304 3304 debug_tbolt_fw_faults_after_ocr_g == 1) {
3305 3305 cur_abs_reg_val =
3306 3306 instance->func_ptr->read_fw_status_reg(instance);
3307 3307 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3308 3308
3309 3309 con_log(CL_ANN1, (CE_NOTE,
3310 3310 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3311 3311 "FW state = 0x%x", fw_state));
3312 3312 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3313 3313 fw_state = MFI_STATE_FAULT;
3314 3314
3315 3315 con_log(CL_ANN,
3316 3316 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3317 3317 "FW state = 0x%x", fw_state));
3318 3318
3319 3319 if (fw_state == MFI_STATE_FAULT) {
3320 3320 /* increment the count */
3321 3321 instance->fw_fault_count_after_ocr++;
3322 3322 if (instance->fw_fault_count_after_ocr
3323 3323 < MAX_FW_RESET_COUNT) {
3324 3324 dev_err(instance->dip, CE_WARN,
3325 3325 "mrsas_tbolt_reset_ppc: "
3326 3326 "FW is in fault after OCR count %d "
3327 3327 "Retry Reset",
3328 3328 instance->fw_fault_count_after_ocr);
3329 3329 goto retry_reset;
3330 3330
3331 3331 } else {
3332 3332 dev_err(instance->dip, CE_WARN, "%s:"
3333 3333 "Max Reset Count exceeded >%d"
3334 3334 "Mark HBA as bad, KILL adapter",
3335 3335 __func__, MAX_FW_RESET_COUNT);
3336 3336
3337 3337 mrsas_tbolt_kill_adapter(instance);
3338 3338 return (DDI_FAILURE);
3339 3339 }
3340 3340 }
3341 3341 }
3342 3342
3343 3343 /* reset the counter as FW is up after OCR */
3344 3344 instance->fw_fault_count_after_ocr = 0;
3345 3345
3346 3346 mrsas_reset_reply_desc(instance);
3347 3347
3348 3348
3349 3349 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3350 3350 "Calling mrsas_issue_init_mpi2"));
3351 3351 abs_state = mrsas_issue_init_mpi2(instance);
3352 3352 if (abs_state == (uint32_t)DDI_FAILURE) {
3353 3353 dev_err(instance->dip, CE_WARN, "mrsas_tbolt_reset_ppc: "
3354 3354 "INIT failed Retrying Reset");
3355 3355 goto retry_reset;
3356 3356 }
3357 3357 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3358 3358 "mrsas_issue_init_mpi2 Done"));
3359 3359
3360 3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 3361 "Calling mrsas_print_pending_cmd"));
3362 3362 (void) mrsas_print_pending_cmds(instance);
3363 3363 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3364 3364 "mrsas_print_pending_cmd done"));
3365 3365
3366 3366 instance->func_ptr->enable_intr(instance);
3367 3367 instance->fw_outstanding = 0;
3368 3368
3369 3369 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3370 3370 "Calling mrsas_issue_pending_cmds"));
3371 3371 (void) mrsas_issue_pending_cmds(instance);
3372 3372 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3373 3373 "issue_pending_cmds done."));
3374 3374
3375 3375 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3376 3376 "Calling aen registration"));
3377 3377
3378 3378 instance->aen_cmd->retry_count_for_ocr = 0;
3379 3379 instance->aen_cmd->drv_pkt_time = 0;
3380 3380
3381 3381 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3382 3382
3383 3383 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3384 3384 mutex_enter(&instance->ocr_flags_mtx);
3385 3385 instance->adapterresetinprogress = 0;
3386 3386 mutex_exit(&instance->ocr_flags_mtx);
3387 3387 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3388 3388 "adpterresetinprogress flag unset"));
3389 3389
3390 3390 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3391 3391 return (DDI_SUCCESS);
3392 3392
3393 3393 }
3394 3394
3395 3395
3396 3396 /*
3397 3397 * mrsas_sync_map_info - Returns FW's ld_map structure
3398 3398 * @instance: Adapter soft state
3399 3399 *
3400 3400 * Issues an internal command (DCMD) to get the FW's controller PD
3401 3401 * list structure. This information is mainly used to find out SYSTEM
3402 3402 * supported by the FW.
3403 3403 */
3404 3404
3405 3405 static int
3406 3406 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3407 3407 {
3408 3408 int ret = 0, i;
3409 3409 struct mrsas_cmd *cmd = NULL;
3410 3410 struct mrsas_dcmd_frame *dcmd;
3411 3411 uint32_t size_sync_info, num_lds;
3412 3412 LD_TARGET_SYNC *ci = NULL;
3413 3413 MR_FW_RAID_MAP_ALL *map;
3414 3414 MR_LD_RAID *raid;
3415 3415 LD_TARGET_SYNC *ld_sync;
3416 3416 uint32_t ci_h = 0;
3417 3417 uint32_t size_map_info;
3418 3418
3419 3419 cmd = get_raid_msg_pkt(instance);
3420 3420
3421 3421 if (cmd == NULL) {
3422 3422 dev_err(instance->dip, CE_WARN,
3423 3423 "Failed to get a cmd from free-pool in "
3424 3424 "mrsas_tbolt_sync_map_info().");
3425 3425 return (DDI_FAILURE);
3426 3426 }
3427 3427
3428 3428 /* Clear the frame buffer and assign back the context id */
3429 3429 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3430 3430 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3431 3431 cmd->index);
3432 3432 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3433 3433
3434 3434
3435 3435 map = instance->ld_map[instance->map_id & 1];
3436 3436
3437 3437 num_lds = map->raidMap.ldCount;
3438 3438
3439 3439 dcmd = &cmd->frame->dcmd;
3440 3440
3441 3441 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3442 3442
3443 3443 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3444 3444 size_sync_info, num_lds));
3445 3445
3446 3446 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3447 3447
3448 3448 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3449 3449 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3450 3450
3451 3451 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3452 3452
3453 3453 ld_sync = (LD_TARGET_SYNC *)ci;
3454 3454
3455 3455 for (i = 0; i < num_lds; i++, ld_sync++) {
3456 3456 raid = MR_LdRaidGet(i, map);
3457 3457
3458 3458 con_log(CL_ANN1,
3459 3459 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3460 3460 i, raid->seqNum, raid->flags.ldSyncRequired));
3461 3461
3462 3462 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3463 3463
3464 3464 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3465 3465 i, ld_sync->ldTargetId));
3466 3466
3467 3467 ld_sync->seqNum = raid->seqNum;
3468 3468 }
3469 3469
3470 3470
3471 3471 size_map_info = sizeof (MR_FW_RAID_MAP) +
3472 3472 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3473 3473
3474 3474 dcmd->cmd = MFI_CMD_OP_DCMD;
3475 3475 dcmd->cmd_status = 0xFF;
3476 3476 dcmd->sge_count = 1;
3477 3477 dcmd->flags = MFI_FRAME_DIR_WRITE;
3478 3478 dcmd->timeout = 0;
3479 3479 dcmd->pad_0 = 0;
3480 3480 dcmd->data_xfer_len = size_map_info;
3481 3481 ASSERT(num_lds <= 255);
3482 3482 dcmd->mbox.b[0] = (U8)num_lds;
3483 3483 dcmd->mbox.b[1] = 1; /* Pend */
3484 3484 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3485 3485 dcmd->sgl.sge32[0].phys_addr = ci_h;
3486 3486 dcmd->sgl.sge32[0].length = size_map_info;
3487 3487
3488 3488
3489 3489 instance->map_update_cmd = cmd;
3490 3490 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3491 3491
3492 3492 instance->func_ptr->issue_cmd(cmd, instance);
3493 3493
3494 3494 instance->unroll.syncCmd = 1;
3495 3495 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3496 3496
3497 3497 return (ret);
3498 3498 }
3499 3499
3500 3500 /*
3501 3501 * abort_syncmap_cmd
3502 3502 */
3503 3503 int
3504 3504 abort_syncmap_cmd(struct mrsas_instance *instance,
3505 3505 struct mrsas_cmd *cmd_to_abort)
3506 3506 {
3507 3507 int ret = 0;
3508 3508
3509 3509 struct mrsas_cmd *cmd;
3510 3510 struct mrsas_abort_frame *abort_fr;
3511 3511
3512 3512 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3513 3513
3514 3514 cmd = get_raid_msg_mfi_pkt(instance);
3515 3515
3516 3516 if (!cmd) {
3517 3517 dev_err(instance->dip, CE_WARN,
3518 3518 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3519 3519 return (DDI_FAILURE);
3520 3520 }
3521 3521 /* Clear the frame buffer and assign back the context id */
3522 3522 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3523 3523 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3524 3524 cmd->index);
3525 3525
3526 3526 abort_fr = &cmd->frame->abort;
3527 3527
3528 3528 /* prepare and issue the abort frame */
3529 3529 ddi_put8(cmd->frame_dma_obj.acc_handle,
3530 3530 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3531 3531 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3532 3532 MFI_CMD_STATUS_SYNC_MODE);
3533 3533 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3534 3534 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3535 3535 cmd_to_abort->index);
3536 3536 ddi_put32(cmd->frame_dma_obj.acc_handle,
3537 3537 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3538 3538 ddi_put32(cmd->frame_dma_obj.acc_handle,
3539 3539 &abort_fr->abort_mfi_phys_addr_hi, 0);
3540 3540
3541 3541 cmd->frame_count = 1;
3542 3542
3543 3543 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3544 3544
3545 3545 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3546 3546 con_log(CL_ANN1, (CE_WARN,
3547 3547 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3548 3548 ret = -1;
3549 3549 } else {
3550 3550 ret = 0;
3551 3551 }
3552 3552
3553 3553 return_raid_msg_mfi_pkt(instance, cmd);
3554 3554
3555 3555 atomic_add_16(&instance->fw_outstanding, (-1));
3556 3556
3557 3557 return (ret);
3558 3558 }
3559 3559
3560 3560
3561 3561 #ifdef PDSUPPORT
3562 3562 /*
3563 3563 * Even though these functions were originally intended for 2208 only, it
3564 3564 * turns out they're useful for "Skinny" support as well. In a perfect world,
3565 3565 * these two functions would be either in mr_sas.c, or in their own new source
3566 3566 * file. Since this driver needs some cleanup anyway, keep this portion in
3567 3567 * mind as well.
3568 3568 */
3569 3569
3570 3570 int
3571 3571 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3572 3572 uint8_t lun, dev_info_t **ldip)
3573 3573 {
3574 3574 struct scsi_device *sd;
3575 3575 dev_info_t *child;
3576 3576 int rval, dtype;
3577 3577 struct mrsas_tbolt_pd_info *pds = NULL;
3578 3578
3579 3579 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3580 3580 tgt, lun));
3581 3581
3582 3582 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3583 3583 if (ldip) {
3584 3584 *ldip = child;
3585 3585 }
3586 3586 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3587 3587 rval = mrsas_service_evt(instance, tgt, 1,
3588 3588 MRSAS_EVT_UNCONFIG_TGT, NULL);
3589 3589 con_log(CL_ANN1, (CE_WARN,
3590 3590 "mr_sas:DELETING STALE ENTRY rval = %d "
3591 3591 "tgt id = %d", rval, tgt));
3592 3592 return (NDI_FAILURE);
3593 3593 }
3594 3594 return (NDI_SUCCESS);
3595 3595 }
3596 3596
3597 3597 pds = (struct mrsas_tbolt_pd_info *)
3598 3598 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3599 3599 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3600 3600 dtype = pds->scsiDevType;
3601 3601
3602 3602 /* Check for Disk */
3603 3603 if ((dtype == DTYPE_DIRECT)) {
3604 3604 if ((dtype == DTYPE_DIRECT) &&
3605 3605 (LE_16(pds->fwState) != PD_SYSTEM)) {
3606 3606 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3607 3607 return (NDI_FAILURE);
3608 3608 }
3609 3609 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3610 3610 sd->sd_address.a_hba_tran = instance->tran;
3611 3611 sd->sd_address.a_target = (uint16_t)tgt;
3612 3612 sd->sd_address.a_lun = (uint8_t)lun;
3613 3613
3614 3614 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3615 3615 rval = mrsas_config_scsi_device(instance, sd, ldip);
3616 3616 dev_err(instance->dip, CE_CONT,
3617 3617 "?Phys. device found: tgt %d dtype %d: %s\n",
3618 3618 tgt, dtype, sd->sd_inq->inq_vid);
3619 3619 } else {
3620 3620 rval = NDI_FAILURE;
3621 3621 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3622 3622 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3623 3623 tgt, dtype, sd->sd_inq->inq_vid));
3624 3624 }
3625 3625
3626 3626 /* sd_unprobe is blank now. Free buffer manually */
3627 3627 if (sd->sd_inq) {
3628 3628 kmem_free(sd->sd_inq, SUN_INQSIZE);
3629 3629 sd->sd_inq = (struct scsi_inquiry *)NULL;
3630 3630 }
3631 3631 kmem_free(sd, sizeof (struct scsi_device));
3632 3632 } else {
3633 3633 con_log(CL_ANN1, (CE_NOTE,
3634 3634 "?Device not supported: tgt %d lun %d dtype %d",
3635 3635 tgt, lun, dtype));
3636 3636 rval = NDI_FAILURE;
3637 3637 }
3638 3638
3639 3639 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3640 3640 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3641 3641 rval));
3642 3642 return (rval);
3643 3643 }
3644 3644
3645 3645 static void
3646 3646 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3647 3647 struct mrsas_tbolt_pd_info *pds, int tgt)
3648 3648 {
3649 3649 struct mrsas_cmd *cmd;
3650 3650 struct mrsas_dcmd_frame *dcmd;
3651 3651 dma_obj_t dcmd_dma_obj;
3652 3652
3653 3653 ASSERT(instance->tbolt || instance->skinny);
3654 3654
3655 3655 if (instance->tbolt)
3656 3656 cmd = get_raid_msg_pkt(instance);
3657 3657 else
3658 3658 cmd = mrsas_get_mfi_pkt(instance);
3659 3659
3660 3660 if (!cmd) {
3661 3661 con_log(CL_ANN1,
3662 3662 (CE_WARN, "Failed to get a cmd for get pd info"));
3663 3663 return;
3664 3664 }
3665 3665
3666 3666 /* Clear the frame buffer and assign back the context id */
3667 3667 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3668 3668 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3669 3669 cmd->index);
3670 3670
3671 3671
3672 3672 dcmd = &cmd->frame->dcmd;
3673 3673 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3674 3674 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3675 3675 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3676 3676 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3677 3677 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3678 3678 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3679 3679
3680 3680 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3681 3681 DDI_STRUCTURE_LE_ACC);
3682 3682 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3683 3683 bzero(dcmd->mbox.b, 12);
3684 3684 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3685 3685 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3686 3686 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3687 3687 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3688 3688 MFI_FRAME_DIR_READ);
3689 3689 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3690 3690 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3691 3691 sizeof (struct mrsas_tbolt_pd_info));
3692 3692 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3693 3693 MR_DCMD_PD_GET_INFO);
3694 3694 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3695 3695 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3696 3696 sizeof (struct mrsas_tbolt_pd_info));
3697 3697 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3698 3698 dcmd_dma_obj.dma_cookie[0].dmac_address);
3699 3699
3700 3700 cmd->sync_cmd = MRSAS_TRUE;
3701 3701 cmd->frame_count = 1;
3702 3702
3703 3703 if (instance->tbolt)
3704 3704 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3705 3705
3706 3706 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3707 3707
3708 3708 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3709 3709 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3710 3710 DDI_DEV_AUTOINCR);
3711 3711 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3712 3712
3713 3713 if (instance->tbolt)
3714 3714 return_raid_msg_pkt(instance, cmd);
3715 3715 else
3716 3716 mrsas_return_mfi_pkt(instance, cmd);
3717 3717 }
3718 3718 #endif
↓ open down ↓ |
2642 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX