Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun/io/dada/targets/dad.c
+++ new/usr/src/uts/sun/io/dada/targets/dad.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26
27 27 /*
28 28 * Direct Attached disk driver for SPARC machines.
29 29 */
30 30
31 31 /*
32 32 * Includes, Declarations and Local Data
33 33 */
34 34 #include <sys/dada/dada.h>
35 35 #include <sys/dkbad.h>
36 36 #include <sys/dklabel.h>
37 37 #include <sys/dkio.h>
38 38 #include <sys/cdio.h>
39 39 #include <sys/vtoc.h>
40 40 #include <sys/dada/targets/daddef.h>
41 41 #include <sys/dada/targets/dadpriv.h>
42 42 #include <sys/file.h>
43 43 #include <sys/stat.h>
44 44 #include <sys/kstat.h>
45 45 #include <sys/vtrace.h>
46 46 #include <sys/aio_req.h>
47 47 #include <sys/note.h>
48 48 #include <sys/cmlb.h>
49 49
50 50 /*
51 51 * Global Error Levels for Error Reporting
52 52 */
53 53 int dcd_error_level = DCD_ERR_RETRYABLE;
54 54 /*
55 55 * Local Static Data
56 56 */
57 57
58 58 static int dcd_io_time = DCD_IO_TIME;
59 59 static int dcd_retry_count = DCD_RETRY_COUNT;
60 60 #ifndef lint
61 61 static int dcd_report_pfa = 1;
62 62 #endif
63 63 static int dcd_rot_delay = 4;
64 64 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT;
65 65
66 66 /*
67 67 * Local Function Prototypes
68 68 */
69 69
70 70 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
71 71 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
72 72 static int dcdstrategy(struct buf *bp);
73 73 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
74 74 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
75 75 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p);
76 76 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
77 77 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int,
78 78 char *, caddr_t, int *);
79 79 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
80 80 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
81 81
82 82
83 83 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
84 84 static int dcd_doattach(dev_info_t *devi, int (*f)());
85 85 static int dcd_validate_geometry(struct dcd_disk *un);
86 86 static ddi_devid_t dcd_get_devid(struct dcd_disk *un);
87 87 static ddi_devid_t dcd_create_devid(struct dcd_disk *un);
88 88 static int dcd_make_devid_from_serial(struct dcd_disk *un);
89 89 static void dcd_validate_model_serial(char *str, int *retlen, int totallen);
90 90 static int dcd_read_deviceid(struct dcd_disk *un);
91 91 static int dcd_write_deviceid(struct dcd_disk *un);
92 92 static int dcd_poll(struct dcd_pkt *pkt);
93 93 static char *dcd_rname(int reason);
94 94 static void dcd_flush_cache(struct dcd_disk *un);
95 95
96 96 static int dcd_compute_dk_capacity(struct dcd_device *devp,
97 97 diskaddr_t *capacity);
98 98 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr,
99 99 diskaddr_t start_block, size_t reqlength, uchar_t cmd);
100 100
101 101 static void dcdmin(struct buf *bp);
102 102
103 103 static int dcdioctl_cmd(dev_t, struct udcd_cmd *,
104 104 enum uio_seg, enum uio_seg);
105 105
106 106 static void dcdstart(struct dcd_disk *un);
107 107 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
108 108 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
109 109 static void dcdudcdmin(struct buf *bp);
110 110
111 111 static int dcdrunout(caddr_t);
112 112 static int dcd_check_wp(dev_t dev);
113 113 static int dcd_unit_ready(dev_t dev);
114 114 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp,
115 115 struct dcd_disk *un);
116 116 static void dcdintr(struct dcd_pkt *pkt);
117 117 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
118 118 static void dcd_offline(struct dcd_disk *un, int bechatty);
119 119 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
120 120 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
121 121 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp);
122 122 static int dcdflushdone(struct buf *bp);
123 123
124 124 /* Function prototypes for cmlb */
125 125
126 126 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
127 127 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
128 128
129 129 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp);
130 130 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg,
131 131 void *tg_cookie);
132 132
133 133
134 134 static cmlb_tg_ops_t dcd_lb_ops = {
135 135 TG_DK_OPS_VERSION_1,
136 136 dcd_lb_rdwr,
137 137 dcd_lb_getinfo
138 138 };
139 139
140 140 /*
141 141 * Error and Logging Functions
142 142 */
143 143 #ifndef lint
144 144 static void clean_print(dev_info_t *dev, char *label, uint_t level,
145 145 char *title, char *data, int len);
146 146 static void dcdrestart(void *arg);
147 147 #endif /* lint */
148 148
149 149 static int dcd_check_error(struct dcd_disk *un, struct buf *bp);
150 150
151 151 /*
152 152 * Error statistics create/update functions
153 153 */
154 154 static int dcd_create_errstats(struct dcd_disk *, int);
155 155
156 156
157 157
158 158 /*PRINTFLIKE4*/
159 159 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...)
160 160 __KPRINTFLIKE(4);
161 161 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t,
162 162 uchar_t, uint32_t, uchar_t, uchar_t);
163 163
164 164
165 165 /*
166 166 * Configuration Routines
167 167 */
168 168 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
169 169 void **result);
170 170 static int dcdprobe(dev_info_t *devi);
171 171 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
172 172 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
173 173 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd);
174 174 static int dcd_dr_detach(dev_info_t *devi);
175 175 static int dcdpower(dev_info_t *devi, int component, int level);
176 176
177 177 static void *dcd_state;
178 178 static int dcd_max_instance;
179 179 static char *dcd_label = "dad";
180 180
181 181 static char *diskokay = "disk okay\n";
182 182
183 183 #if DEBUG || lint
184 184 #define DCDDEBUG
185 185 #endif
186 186
187 187 int dcd_test_flag = 0;
188 188 /*
189 189 * Debugging macros
190 190 */
191 191 #ifdef DCDDEBUG
192 192 static int dcddebug = 0;
193 193 #define DEBUGGING (dcddebug > 1)
194 194 #define DAD_DEBUG if (dcddebug == 1) dcd_log
195 195 #define DAD_DEBUG2 if (dcddebug > 1) dcd_log
196 196 #else /* DCDDEBUG */
197 197 #define dcddebug (0)
198 198 #define DEBUGGING (0)
199 199 #define DAD_DEBUG if (0) dcd_log
200 200 #define DAD_DEBUG2 if (0) dcd_log
201 201 #endif
202 202
203 203 /*
204 204 * we use pkt_private area for storing bp and retry_count
205 205 * XXX: Really is this usefull.
206 206 */
207 207 struct dcd_pkt_private {
208 208 struct buf *dcdpp_bp;
209 209 short dcdpp_retry_count;
210 210 short dcdpp_victim_retry_count;
211 211 };
212 212
213 213
214 214 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf))
215 215
216 216 #define PP_LEN (sizeof (struct dcd_pkt_private))
217 217
218 218 #define PKT_SET_BP(pkt, bp) \
219 219 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
220 220 #define PKT_GET_BP(pkt) \
221 221 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
222 222
223 223
224 224 #define PKT_SET_RETRY_CNT(pkt, n) \
225 225 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
226 226
227 227 #define PKT_GET_RETRY_CNT(pkt) \
228 228 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
229 229
230 230 #define PKT_INCR_RETRY_CNT(pkt, n) \
231 231 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
232 232
233 233 #define PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
234 234 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
235 235 = n
236 236
237 237 #define PKT_GET_VICTIM_RETRY_CNT(pkt) \
238 238 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
239 239 #define PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
240 240 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
241 241 += n
242 242
243 243 #define DISK_NOT_READY_RETRY_COUNT (dcd_retry_count / 2)
244 244
245 245
246 246 /*
247 247 * Urk!
248 248 */
249 249 #define SET_BP_ERROR(bp, err) \
250 250 bioerror(bp, err);
251 251
252 252 #define IOSP KSTAT_IO_PTR(un->un_stats)
253 253 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)]
254 254 #define IOSP_PARTITION KSTAT_IO_PTR(IO_PARTITION_STATS)
255 255
256 256 #define DCD_DO_KSTATS(un, kstat_function, bp) \
257 257 ASSERT(mutex_owned(DCD_MUTEX)); \
258 258 if (bp != un->un_sbufp) { \
259 259 if (un->un_stats) { \
260 260 kstat_function(IOSP); \
261 261 } \
262 262 if (IO_PARTITION_STATS) { \
263 263 kstat_function(IOSP_PARTITION); \
264 264 } \
265 265 }
266 266
267 267 #define DCD_DO_ERRSTATS(un, x) \
268 268 if (un->un_errstats) { \
269 269 struct dcd_errstats *dtp; \
270 270 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
271 271 dtp->x.value.ui32++; \
272 272 }
273 273
274 274 #define GET_SOFT_STATE(dev) \
275 275 struct dcd_disk *un; \
276 276 int instance, part; \
277 277 minor_t minor = getminor(dev); \
278 278 \
279 279 part = minor & DCDPART_MASK; \
280 280 instance = minor >> DCDUNIT_SHIFT; \
281 281 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \
282 282 return (ENXIO);
283 283
284 284 #define LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
285 285 (((blkno) & ((1 << (blknoshift)) - 1)) == 0)
286 286
287 287 /*
288 288 * After the following number of sectors, the cylinder number spills over
289 289 * 0xFFFF if sectors = 63 and heads = 16.
290 290 */
291 291 #define NUM_SECTORS_32G 0x3EFFC10
292 292
293 293 /*
294 294 * Configuration Data
295 295 */
296 296
297 297 /*
298 298 * Device driver ops vector
299 299 */
300 300
301 301 static struct cb_ops dcd_cb_ops = {
302 302 dcdopen, /* open */
303 303 dcdclose, /* close */
304 304 dcdstrategy, /* strategy */
305 305 nodev, /* print */
306 306 dcddump, /* dump */
307 307 dcdread, /* read */
308 308 dcdwrite, /* write */
309 309 dcdioctl, /* ioctl */
310 310 nodev, /* devmap */
311 311 nodev, /* mmap */
312 312 nodev, /* segmap */
313 313 nochpoll, /* poll */
314 314 dcd_prop_op, /* cb_prop_op */
315 315 0, /* streamtab */
316 316 D_64BIT | D_MP | D_NEW, /* Driver compatibility flag */
317 317 CB_REV, /* cb_rev */
318 318 dcdaread, /* async I/O read entry point */
319 319 dcdawrite /* async I/O write entry point */
320 320 };
321 321
322 322 static struct dev_ops dcd_ops = {
323 323 DEVO_REV, /* devo_rev, */
324 324 0, /* refcnt */
325 325 dcdinfo, /* info */
326 326 nulldev, /* identify */
327 327 dcdprobe, /* probe */
328 328 dcdattach, /* attach */
329 329 dcddetach, /* detach */
330 330 dcdreset, /* reset */
331 331 &dcd_cb_ops, /* driver operations */
332 332 (struct bus_ops *)0, /* bus operations */
333 333 dcdpower, /* power */
334 334 ddi_quiesce_not_supported, /* devo_quiesce */
335 335 };
336 336
337 337
338 338 /*
339 339 * This is the loadable module wrapper.
340 340 */
341 341 #include <sys/modctl.h>
342 342
343 343 static struct modldrv modldrv = {
344 344 &mod_driverops, /* Type of module. This one is a driver */
345 345 "DAD Disk Driver", /* Name of the module. */
346 346 &dcd_ops, /* driver ops */
347 347 };
348 348
349 349
350 350
351 351 static struct modlinkage modlinkage = {
352 352 MODREV_1, &modldrv, NULL
353 353 };
354 354
355 355 /*
356 356 * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
357 357 * attach situations
358 358 */
359 359 static kmutex_t dcd_attach_mutex;
360 360
361 361 int
362 362 _init(void)
363 363 {
364 364 int e;
365 365
366 366 if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk),
367 367 DCD_MAXUNIT)) != 0)
368 368 return (e);
369 369
370 370 mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL);
371 371 e = mod_install(&modlinkage);
372 372 if (e != 0) {
373 373 mutex_destroy(&dcd_attach_mutex);
374 374 ddi_soft_state_fini(&dcd_state);
375 375 return (e);
376 376 }
377 377
378 378 return (e);
379 379 }
380 380
381 381 int
382 382 _fini(void)
383 383 {
384 384 int e;
385 385
386 386 if ((e = mod_remove(&modlinkage)) != 0)
387 387 return (e);
388 388
389 389 ddi_soft_state_fini(&dcd_state);
390 390 mutex_destroy(&dcd_attach_mutex);
391 391
392 392 return (e);
393 393 }
394 394
395 395 int
396 396 _info(struct modinfo *modinfop)
397 397 {
398 398
399 399 return (mod_info(&modlinkage, modinfop));
400 400 }
401 401
402 402 static int
403 403 dcdprobe(dev_info_t *devi)
404 404 {
405 405 struct dcd_device *devp;
406 406 int rval = DDI_PROBE_PARTIAL;
407 407 int instance;
408 408
409 409 devp = ddi_get_driver_private(devi);
410 410 instance = ddi_get_instance(devi);
411 411
412 412 /*
413 413 * Keep a count of how many disks (ie. highest instance no) we have
414 414 * XXX currently not used but maybe useful later again
415 415 */
416 416 mutex_enter(&dcd_attach_mutex);
417 417 if (instance > dcd_max_instance)
418 418 dcd_max_instance = instance;
419 419 mutex_exit(&dcd_attach_mutex);
420 420
421 421 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "dcdprobe:\n");
422 422
423 423 if (ddi_get_soft_state(dcd_state, instance) != NULL)
424 424 return (DDI_PROBE_PARTIAL);
425 425
426 426 /*
427 427 * Turn around and call utility probe routine
428 428 * to see whether we actually have a disk at
429 429 */
430 430
431 431 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
432 432 "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC));
433 433
434 434 switch (dcd_probe(devp, NULL_FUNC)) {
435 435 default:
436 436 case DCDPROBE_NORESP:
437 437 case DCDPROBE_NONCCS:
438 438 case DCDPROBE_NOMEM:
439 439 case DCDPROBE_FAILURE:
440 440 case DCDPROBE_BUSY:
441 441 break;
442 442
443 443 case DCDPROBE_EXISTS:
444 444 /*
445 445 * Check whether it is a ATA device and then
446 446 * return SUCCESS.
447 447 */
448 448 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
449 449 "config %x\n", devp->dcd_ident->dcd_config);
450 450 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
451 451 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
452 452 rval = DDI_PROBE_SUCCESS;
453 453 } else
454 454 rval = DDI_PROBE_FAILURE;
455 455 } else {
456 456 rval = DDI_PROBE_FAILURE;
457 457 }
458 458 break;
459 459 }
460 460 dcd_unprobe(devp);
461 461
462 462 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
463 463 "dcdprobe returns %x\n", rval);
464 464
465 465 return (rval);
466 466 }
467 467
468 468
469 469 /*ARGSUSED*/
470 470 static int
471 471 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
472 472 {
473 473 int instance, rval;
474 474 struct dcd_device *devp;
475 475 struct dcd_disk *un;
476 476 struct diskhd *dp;
477 477 char *pm_comp[] =
478 478 { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
479 479
480 480 /* CONSTCOND */
481 481 ASSERT(NO_COMPETING_THREADS);
482 482
483 483
484 484 devp = ddi_get_driver_private(devi);
485 485 instance = ddi_get_instance(devi);
486 486 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n");
487 487
488 488 switch (cmd) {
489 489 case DDI_ATTACH:
490 490 break;
491 491
492 492 case DDI_RESUME:
493 493 if (!(un = ddi_get_soft_state(dcd_state, instance)))
494 494 return (DDI_FAILURE);
495 495 mutex_enter(DCD_MUTEX);
496 496 Restore_state(un);
497 497 /*
498 498 * Restore the state which was saved to give the
499 499 * the right state in un_last_state
500 500 */
501 501 un->un_last_state = un->un_save_state;
502 502 un->un_throttle = 2;
503 503 cv_broadcast(&un->un_suspend_cv);
504 504 /*
505 505 * Raise the power level of the device to active.
506 506 */
507 507 mutex_exit(DCD_MUTEX);
508 508 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
509 509 mutex_enter(DCD_MUTEX);
510 510
511 511 /*
512 512 * start unit - if this is a low-activity device
513 513 * commands in queue will have to wait until new
514 514 * commands come in, which may take awhile.
515 515 * Also, we specifically don't check un_ncmds
516 516 * because we know that there really are no
517 517 * commands in progress after the unit was suspended
518 518 * and we could have reached the throttle level, been
519 519 * suspended, and have no new commands coming in for
520 520 * awhile. Highly unlikely, but so is the low-
521 521 * activity disk scenario.
522 522 */
523 523 dp = &un->un_utab;
524 524 if (dp->b_actf && (dp->b_forw == NULL)) {
525 525 dcdstart(un);
526 526 }
527 527
528 528 mutex_exit(DCD_MUTEX);
529 529 return (DDI_SUCCESS);
530 530
531 531 default:
532 532 return (DDI_FAILURE);
533 533 }
534 534
535 535 if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) {
536 536 return (DDI_FAILURE);
537 537 }
538 538
539 539 if (!(un = (struct dcd_disk *)
540 540 ddi_get_soft_state(dcd_state, instance))) {
541 541 return (DDI_FAILURE);
542 542 }
543 543 devp->dcd_private = (ataopaque_t)un;
544 544
545 545 /*
546 546 * Add a zero-length attribute to tell the world we support
547 547 * kernel ioctls (for layered drivers)
548 548 */
549 549 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
550 550 DDI_KERNEL_IOCTL, NULL, 0);
551 551
552 552 /*
553 553 * Since the dad device does not have the 'reg' property,
554 554 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
555 555 * The following code is to tell cpr that this device
556 556 * does need to be suspended and resumed.
557 557 */
558 558 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
559 559 "pm-hardware-state", (caddr_t)"needs-suspend-resume");
560 560
561 561 /*
562 562 * Initialize power management bookkeeping;
563 563 * Create components - In IDE case there are 3 levels and one
564 564 * component. The levels being - active, idle, standby.
565 565 */
566 566
567 567 rval = ddi_prop_update_string_array(DDI_DEV_T_NONE,
568 568 devi, "pm-components", pm_comp, 4);
569 569 if (rval == DDI_PROP_SUCCESS) {
570 570 /*
571 571 * Ignore the return value of pm_raise_power
572 572 * Even if we check the return values and
573 573 * remove the property created above, PM
574 574 * framework will not honour the change after
575 575 * first call to pm_raise_power. Hence, the
576 576 * removal of that property does not help if
577 577 * pm_raise_power fails.
578 578 */
579 579 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
580 580 }
581 581
582 582 ddi_report_dev(devi);
583 583
584 584 cmlb_alloc_handle(&un->un_dklbhandle);
585 585
586 586 if (cmlb_attach(devi,
587 587 &dcd_lb_ops,
588 588 0,
589 589 B_FALSE,
590 590 B_FALSE,
591 591 DDI_NT_BLOCK_CHAN,
592 592 CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8,
593 593 un->un_dklbhandle,
594 594 0) != 0) {
595 595 cmlb_free_handle(&un->un_dklbhandle);
596 596 dcd_free_softstate(un, devi);
597 597 return (DDI_FAILURE);
598 598 }
599 599
600 600 mutex_enter(DCD_MUTEX);
601 601 (void) dcd_validate_geometry(un);
602 602
603 603 /* Get devid; create a devid ONLY IF could not get ID */
604 604 if (dcd_get_devid(un) == NULL) {
605 605 /* Create the fab'd devid */
606 606 (void) dcd_create_devid(un);
607 607 }
608 608 mutex_exit(DCD_MUTEX);
609 609
610 610 return (DDI_SUCCESS);
611 611 }
612 612
613 613 static void
614 614 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi)
615 615 {
616 616 struct dcd_device *devp;
617 617 int instance = ddi_get_instance(devi);
618 618
619 619 devp = ddi_get_driver_private(devi);
620 620
621 621 if (un) {
622 622 sema_destroy(&un->un_semoclose);
623 623 cv_destroy(&un->un_sbuf_cv);
624 624 cv_destroy(&un->un_state_cv);
625 625 cv_destroy(&un->un_disk_busy_cv);
626 626 cv_destroy(&un->un_suspend_cv);
627 627
628 628 /*
629 629 * Deallocate command packet resources.
630 630 */
631 631 if (un->un_sbufp)
632 632 freerbuf(un->un_sbufp);
633 633 if (un->un_dp) {
634 634 kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp));
635 635 }
636 636 /*
637 637 * Unregister the devid and free devid resources allocated
638 638 */
639 639 ddi_devid_unregister(DCD_DEVINFO);
640 640 if (un->un_devid) {
641 641 ddi_devid_free(un->un_devid);
642 642 un->un_devid = NULL;
643 643 }
644 644
645 645 /*
646 646 * Delete kstats. Kstats for non CD devices are deleted
647 647 * in dcdclose.
648 648 */
649 649 if (un->un_stats) {
650 650 kstat_delete(un->un_stats);
651 651 }
652 652
653 653 }
654 654
655 655 /*
656 656 * Cleanup scsi_device resources.
657 657 */
658 658 ddi_soft_state_free(dcd_state, instance);
659 659 devp->dcd_private = (ataopaque_t)0;
660 660 /* unprobe scsi device */
661 661 dcd_unprobe(devp);
662 662
663 663 /* Remove properties created during attach */
664 664 ddi_prop_remove_all(devi);
665 665 }
666 666
667 667 static int
668 668 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
669 669 {
670 670 int instance;
671 671 struct dcd_disk *un;
672 672 clock_t wait_cmds_complete;
673 673 instance = ddi_get_instance(devi);
674 674
675 675 if (!(un = ddi_get_soft_state(dcd_state, instance)))
676 676 return (DDI_FAILURE);
677 677
678 678 switch (cmd) {
679 679 case DDI_DETACH:
680 680 return (dcd_dr_detach(devi));
681 681
682 682 case DDI_SUSPEND:
683 683 mutex_enter(DCD_MUTEX);
684 684 if (un->un_state == DCD_STATE_SUSPENDED) {
685 685 mutex_exit(DCD_MUTEX);
686 686 return (DDI_SUCCESS);
687 687 }
688 688 un->un_throttle = 0;
689 689 /*
690 690 * Save the last state first
691 691 */
692 692 un->un_save_state = un->un_last_state;
693 693
694 694 New_state(un, DCD_STATE_SUSPENDED);
695 695
↓ open down ↓ |
695 lines elided |
↑ open up ↑ |
696 696 /*
697 697 * wait till current operation completed. If we are
698 698 * in the resource wait state (with an intr outstanding)
699 699 * then we need to wait till the intr completes and
700 700 * starts the next cmd. We wait for
701 701 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
702 702 * DDI_SUSPEND.
703 703 */
704 704 wait_cmds_complete = ddi_get_lbolt();
705 705 wait_cmds_complete +=
706 - DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000);
706 + drv_sectohz(DCD_WAIT_CMDS_COMPLETE);
707 707
708 708 while (un->un_ncmds) {
709 709 if (cv_timedwait(&un->un_disk_busy_cv,
710 710 DCD_MUTEX, wait_cmds_complete) == -1) {
711 711 /*
712 712 * commands Didn't finish in the
713 713 * specified time, fail the DDI_SUSPEND.
714 714 */
715 715 DAD_DEBUG2(DCD_DEVINFO, dcd_label,
716 716 DCD_DEBUG, "dcddetach: SUSPEND "
717 717 "failed due to outstanding cmds\n");
718 718 Restore_state(un);
719 719 mutex_exit(DCD_MUTEX);
720 720 return (DDI_FAILURE);
721 721 }
722 722 }
723 723 mutex_exit(DCD_MUTEX);
724 724 return (DDI_SUCCESS);
725 725 }
726 726 return (DDI_FAILURE);
727 727 }
728 728
729 729 /*
730 730 * The reset entry point gets invoked at the system shutdown time or through
731 731 * CPR code at system suspend.
732 732 * Will be flushing the cache and expect this to be last I/O operation to the
733 733 * disk before system reset/power off.
734 734 */
735 735 /*ARGSUSED*/
736 736 static int
737 737 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd)
738 738 {
739 739 struct dcd_disk *un;
740 740 int instance;
741 741
742 742 instance = ddi_get_instance(dip);
743 743
744 744 if (!(un = ddi_get_soft_state(dcd_state, instance)))
745 745 return (DDI_FAILURE);
746 746
747 747 dcd_flush_cache(un);
748 748
749 749 return (DDI_SUCCESS);
750 750 }
751 751
752 752
753 753 static int
754 754 dcd_dr_detach(dev_info_t *devi)
755 755 {
756 756 struct dcd_device *devp;
757 757 struct dcd_disk *un;
758 758
759 759 /*
760 760 * Get scsi_device structure for this instance.
761 761 */
762 762 if ((devp = ddi_get_driver_private(devi)) == NULL)
763 763 return (DDI_FAILURE);
764 764
765 765 /*
766 766 * Get dcd_disk structure containing target 'private' information
767 767 */
768 768 un = (struct dcd_disk *)devp->dcd_private;
769 769
770 770 /*
771 771 * Verify there are NO outstanding commands issued to this device.
772 772 * ie, un_ncmds == 0.
773 773 * It's possible to have outstanding commands through the physio
774 774 * code path, even though everything's closed.
775 775 */
776 776 #ifndef lint
777 777 _NOTE(COMPETING_THREADS_NOW);
778 778 #endif
779 779 mutex_enter(DCD_MUTEX);
780 780 if (un->un_ncmds) {
781 781 mutex_exit(DCD_MUTEX);
782 782 _NOTE(NO_COMPETING_THREADS_NOW);
783 783 return (DDI_FAILURE);
784 784 }
785 785
786 786 mutex_exit(DCD_MUTEX);
787 787
788 788 cmlb_detach(un->un_dklbhandle, 0);
789 789 cmlb_free_handle(&un->un_dklbhandle);
790 790
791 791
792 792 /*
793 793 * Lower the power state of the device
794 794 * i.e. the minimum power consumption state - sleep.
795 795 */
796 796 (void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY);
797 797
798 798 _NOTE(NO_COMPETING_THREADS_NOW);
799 799
800 800 /*
801 801 * at this point there are no competing threads anymore
802 802 * release active MT locks and all device resources.
803 803 */
804 804 dcd_free_softstate(un, devi);
805 805
806 806 return (DDI_SUCCESS);
807 807 }
808 808
809 809 static int
810 810 dcdpower(dev_info_t *devi, int component, int level)
811 811 {
812 812 struct dcd_pkt *pkt;
813 813 struct dcd_disk *un;
814 814 int instance;
815 815 uchar_t cmd;
816 816
817 817
818 818 instance = ddi_get_instance(devi);
819 819
820 820 if (!(un = ddi_get_soft_state(dcd_state, instance)) ||
821 821 (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) ||
822 822 component != 0) {
823 823 return (DDI_FAILURE);
824 824 }
825 825
826 826 mutex_enter(DCD_MUTEX);
827 827 /*
828 828 * if there are active commands for the device or device will be
829 829 * active soon. At the same time there is request to lower power
830 830 * return failure.
831 831 */
832 832 if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) {
833 833 mutex_exit(DCD_MUTEX);
834 834 return (DDI_FAILURE);
835 835 }
836 836
837 837 if ((un->un_state == DCD_STATE_OFFLINE) ||
838 838 (un->un_state == DCD_STATE_FATAL)) {
839 839 mutex_exit(DCD_MUTEX);
840 840 return (DDI_FAILURE);
841 841 }
842 842
843 843 if (level == DCD_DEVICE_ACTIVE) {
844 844 /*
845 845 * No need to fire any command, just set the state structure
846 846 * to indicate previous state and set the level to active
847 847 */
848 848 un->un_power_level = DCD_DEVICE_ACTIVE;
849 849 if (un->un_state == DCD_STATE_PM_SUSPENDED)
850 850 Restore_state(un);
851 851 mutex_exit(DCD_MUTEX);
852 852 } else {
853 853 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
854 854 NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
855 855 PKT_CONSISTENT, NULL_FUNC, NULL);
856 856
857 857 if (pkt == (struct dcd_pkt *)NULL) {
858 858 mutex_exit(DCD_MUTEX);
859 859 return (DDI_FAILURE);
860 860 }
861 861
862 862 switch (level) {
863 863 case DCD_DEVICE_IDLE:
864 864 cmd = ATA_IDLE_IMMEDIATE;
865 865 break;
866 866
867 867 case DCD_DEVICE_STANDBY:
868 868 cmd = ATA_STANDBY_IMMEDIATE;
869 869 break;
870 870 }
871 871
872 872 makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0);
873 873 mutex_exit(DCD_MUTEX);
874 874 /*
875 875 * Issue the appropriate command
876 876 */
877 877 if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) {
878 878 dcd_destroy_pkt(pkt);
879 879 return (DDI_FAILURE);
880 880 }
881 881 dcd_destroy_pkt(pkt);
882 882 mutex_enter(DCD_MUTEX);
883 883 if (un->un_state != DCD_STATE_PM_SUSPENDED)
884 884 New_state(un, DCD_STATE_PM_SUSPENDED);
885 885 un->un_power_level = level;
886 886 mutex_exit(DCD_MUTEX);
887 887 }
888 888
889 889 return (DDI_SUCCESS);
890 890 }
891 891
892 892 static int
893 893 dcd_doattach(dev_info_t *devi, int (*canwait)())
894 894 {
895 895 struct dcd_device *devp;
896 896 struct dcd_disk *un = (struct dcd_disk *)0;
897 897 int instance;
898 898 int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP;
899 899 int rval;
900 900 char *prop_template = "target%x-dcd-options";
901 901 int options;
902 902 char prop_str[32];
903 903 int target;
904 904 diskaddr_t capacity;
905 905
906 906 devp = ddi_get_driver_private(devi);
907 907
908 908 /*
909 909 * Call the routine scsi_probe to do some of the dirty work.
910 910 * If the INQUIRY command succeeds, the field dcd_inq in the
911 911 * device structure will be filled in. The dcd_sense structure
912 912 * will also be allocated.
913 913 */
914 914
915 915 switch (dcd_probe(devp, canwait)) {
916 916 default:
917 917 return (DDI_FAILURE);
918 918
919 919 case DCDPROBE_EXISTS:
920 920 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
921 921 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
922 922 rval = DDI_SUCCESS;
923 923 } else {
924 924 rval = DDI_FAILURE;
925 925 goto error;
926 926 }
927 927 } else {
928 928 rval = DDI_FAILURE;
929 929 goto error;
930 930 }
931 931 }
932 932
933 933
934 934 instance = ddi_get_instance(devp->dcd_dev);
935 935
936 936 if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) {
937 937 rval = DDI_FAILURE;
938 938 goto error;
939 939 }
940 940
941 941 un = ddi_get_soft_state(dcd_state, instance);
942 942
943 943 un->un_sbufp = getrbuf(km_flags);
944 944 if (un->un_sbufp == (struct buf *)NULL) {
945 945 rval = DDI_FAILURE;
946 946 goto error;
947 947 }
948 948
949 949
950 950 un->un_dcd = devp;
951 951 un->un_power_level = -1;
952 952 un->un_tgattribute.media_is_writable = 1;
953 953
954 954 sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
955 955 cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
956 956 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
957 957 /* Initialize power management conditional variable */
958 958 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
959 959 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
960 960
961 961 if (un->un_dp == 0) {
962 962 /*
963 963 * Assume CCS drive, assume parity, but call
964 964 * it a CDROM if it is a RODIRECT device.
965 965 */
966 966 un->un_dp = (struct dcd_drivetype *)
967 967 kmem_zalloc(sizeof (struct dcd_drivetype), km_flags);
968 968 if (!un->un_dp) {
969 969 rval = DDI_FAILURE;
970 970 goto error;
971 971 }
972 972 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
973 973 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
974 974 un->un_dp->ctype = CTYPE_DISK;
975 975 }
976 976 } else {
977 977 rval = DDI_FAILURE;
978 978 goto error;
979 979 }
980 980 un->un_dp->name = "CCS";
981 981 un->un_dp->options = 0;
982 982 }
983 983
984 984 /*
985 985 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
986 986 */
987 987 un->un_secsize = DEV_BSIZE;
988 988
989 989 /*
990 990 * If the device is not a removable media device, make sure that
991 991 * that the device is ready, by issuing the another identify but
992 992 * not needed. Get the capacity from identify data and store here.
993 993 */
994 994 if (dcd_compute_dk_capacity(devp, &capacity) == 0) {
995 995 un->un_diskcapacity = capacity;
996 996 un->un_lbasize = DEV_BSIZE;
997 997 }
998 998
999 999 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n");
1000 1000 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x",
1001 1001 devp->dcd_ident->dcd_fixcyls,
1002 1002 devp->dcd_ident->dcd_heads);
1003 1003 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,",
1004 1004 devp->dcd_ident->dcd_sectors);
1005 1005 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n",
1006 1006 capacity);
1007 1007
1008 1008 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1009 1009 "dcdprobe: drive selected\n");
1010 1010
1011 1011 /*
1012 1012 * Check for the property target<n>-dcd-options to find the option
1013 1013 * set by the HBA driver for this target so that we can set the
1014 1014 * Unit structure variable so that we can send commands accordingly.
1015 1015 */
1016 1016 target = devp->dcd_address->da_target;
1017 1017 (void) sprintf(prop_str, prop_template, target);
1018 1018 options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM,
1019 1019 prop_str, -1);
1020 1020 if (options < 0) {
1021 1021 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1022 1022 "No per target properties");
1023 1023 } else {
1024 1024 if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) {
1025 1025 un->un_dp->options |= DMA_SUPPORTTED;
1026 1026 un->un_dp->dma_mode = (options >> 3) & 0x03;
1027 1027 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1028 1028 "mode %x\n", un->un_dp->dma_mode);
1029 1029 } else {
1030 1030 un->un_dp->options &= ~DMA_SUPPORTTED;
1031 1031 un->un_dp->pio_mode = options & 0x7;
1032 1032 if (options & DCD_BLOCK_MODE)
1033 1033 un->un_dp->options |= BLOCK_MODE;
1034 1034 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1035 1035 "mode %x\n", un->un_dp->pio_mode);
1036 1036 }
1037 1037 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1038 1038 "options %x,", un->un_dp->options);
1039 1039 }
1040 1040
1041 1041 un->un_throttle = 2;
1042 1042 /*
1043 1043 * set default max_xfer_size - This should depend on whether the
1044 1044 * Block mode is supported by the device or not.
1045 1045 */
1046 1046 un->un_max_xfer_size = MAX_ATA_XFER_SIZE;
1047 1047
1048 1048 /*
1049 1049 * Set write cache enable softstate
1050 1050 *
1051 1051 * WCE is only supported in ATAPI-4 or higher; for
1052 1052 * lower rev devices, must assume write cache is
1053 1053 * enabled.
1054 1054 */
1055 1055 mutex_enter(DCD_MUTEX);
1056 1056 un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) ||
1057 1057 ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) ||
1058 1058 (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0;
1059 1059 mutex_exit(DCD_MUTEX);
1060 1060
1061 1061 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1062 1062 "dcd_doattach returns good\n");
1063 1063
1064 1064 return (rval);
1065 1065
1066 1066 error:
1067 1067 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n");
1068 1068 dcd_free_softstate(un, devi);
1069 1069 return (rval);
1070 1070 }
1071 1071
1072 1072 #ifdef NOTNEEDED
1073 1073 /*
1074 1074 * This routine is used to set the block mode of operation by issuing the
1075 1075 * Set Block mode ata command with the maximum block mode possible
1076 1076 */
1077 1077 dcd_set_multiple(struct dcd_disk *un)
1078 1078 {
1079 1079 int status;
1080 1080 struct udcd_cmd ucmd;
1081 1081 struct dcd_cmd cdb;
1082 1082 dev_t dev;
1083 1083
1084 1084
1085 1085 /* Zero all the required structure */
1086 1086 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1087 1087
1088 1088 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1089 1089
1090 1090 cdb.cmd = ATA_SET_MULTIPLE;
1091 1091 /*
1092 1092 * Here we should pass what needs to go into sector count REGISTER.
1093 1093 * Eventhough this field indicates the number of bytes to read we
1094 1094 * need to specify the block factor in terms of bytes so that it
1095 1095 * will be programmed by the HBA driver into the sector count register.
1096 1096 */
1097 1097 cdb.size = un->un_lbasize * un->un_dp->block_factor;
1098 1098
1099 1099 cdb.sector_num.lba_num = 0;
1100 1100 cdb.address_mode = ADD_LBA_MODE;
1101 1101 cdb.direction = NO_DATA_XFER;
1102 1102
1103 1103 ucmd.udcd_flags = 0;
1104 1104 ucmd.udcd_cmd = &cdb;
1105 1105 ucmd.udcd_bufaddr = NULL;
1106 1106 ucmd.udcd_buflen = 0;
1107 1107 ucmd.udcd_flags |= UDCD_SILENT;
1108 1108
1109 1109 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1110 1110 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1111 1111
1112 1112
1113 1113 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1114 1114
1115 1115 return (status);
1116 1116 }
1117 1117 /*
1118 1118 * The following routine is used only for setting the transfer mode
1119 1119 * and it is not designed for transferring any other features subcommand.
1120 1120 */
1121 1121 dcd_set_features(struct dcd_disk *un, uchar_t mode)
1122 1122 {
1123 1123 int status;
1124 1124 struct udcd_cmd ucmd;
1125 1125 struct dcd_cmd cdb;
1126 1126 dev_t dev;
1127 1127
1128 1128
1129 1129 /* Zero all the required structure */
1130 1130 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1131 1131
1132 1132 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1133 1133
1134 1134 cdb.cmd = ATA_SET_FEATURES;
1135 1135 /*
1136 1136 * Here we need to pass what needs to go into the sector count register
1137 1137 * But in the case of SET FEATURES command the value taken in the
1138 1138 * sector count register depends what type of subcommand is
1139 1139 * passed in the features register. Since we have defined the size to
1140 1140 * be the size in bytes in this context it does not indicate bytes
1141 1141 * instead it indicates the mode to be programmed.
1142 1142 */
1143 1143 cdb.size = un->un_lbasize * mode;
1144 1144
1145 1145 cdb.sector_num.lba_num = 0;
1146 1146 cdb.address_mode = ADD_LBA_MODE;
1147 1147 cdb.direction = NO_DATA_XFER;
1148 1148 cdb.features = ATA_FEATURE_SET_MODE;
1149 1149 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1150 1150 "size %x, features %x, cmd %x\n",
1151 1151 cdb.size, cdb.features, cdb.cmd);
1152 1152
1153 1153 ucmd.udcd_flags = 0;
1154 1154 ucmd.udcd_cmd = &cdb;
1155 1155 ucmd.udcd_bufaddr = NULL;
1156 1156 ucmd.udcd_buflen = 0;
1157 1157 ucmd.udcd_flags |= UDCD_SILENT;
1158 1158
1159 1159 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1160 1160 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1161 1161
1162 1162 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1163 1163
1164 1164 return (status);
1165 1165 }
1166 1166 #endif
1167 1167
1168 1168 /*
1169 1169 * Validate the geometry for this disk, e.g.,
1170 1170 * see whether it has a valid label.
1171 1171 */
1172 1172 static int
1173 1173 dcd_validate_geometry(struct dcd_disk *un)
1174 1174 {
1175 1175 int secsize = 0;
1176 1176 struct dcd_device *devp;
1177 1177 int secdiv;
1178 1178 int rval;
1179 1179
1180 1180 ASSERT(mutex_owned(DCD_MUTEX));
1181 1181 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1182 1182 "dcd_validate_geometry: started \n");
1183 1183
1184 1184 if (un->un_lbasize < 0) {
1185 1185 return (DCD_BAD_LABEL);
1186 1186 }
1187 1187
1188 1188 if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1189 1189 mutex_exit(DCD_MUTEX);
1190 1190 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) !=
1191 1191 DDI_SUCCESS) {
1192 1192 mutex_enter(DCD_MUTEX);
1193 1193 return (DCD_BAD_LABEL);
1194 1194 }
1195 1195 mutex_enter(DCD_MUTEX);
1196 1196 }
1197 1197
1198 1198 secsize = un->un_secsize;
1199 1199
1200 1200 /*
1201 1201 * take a log base 2 of sector size (sorry)
1202 1202 */
1203 1203 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1204 1204 ;
1205 1205 un->un_secdiv = secdiv;
1206 1206
1207 1207 /*
1208 1208 * Only DIRECT ACCESS devices will have Sun labels.
1209 1209 * CD's supposedly have a Sun label, too
1210 1210 */
1211 1211
1212 1212 devp = un->un_dcd;
1213 1213
1214 1214 if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) &&
1215 1215 (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) {
1216 1216 mutex_exit(DCD_MUTEX);
1217 1217 rval = cmlb_validate(un->un_dklbhandle, 0, 0);
1218 1218 mutex_enter(DCD_MUTEX);
1219 1219 if (rval == ENOMEM)
1220 1220 return (DCD_NO_MEM_FOR_LABEL);
1221 1221 else if (rval != 0)
1222 1222 return (DCD_BAD_LABEL);
1223 1223 } else {
1224 1224 /* it should never get here. */
1225 1225 return (DCD_BAD_LABEL);
1226 1226 }
1227 1227
1228 1228 /*
1229 1229 * take a log base 2 of logical block size
1230 1230 */
1231 1231 secsize = un->un_lbasize;
1232 1232 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1233 1233 ;
1234 1234 un->un_lbadiv = secdiv;
1235 1235
1236 1236 /*
1237 1237 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1238 1238 * make up one logical block
1239 1239 */
1240 1240 secsize = un->un_lbasize >> DEV_BSHIFT;
1241 1241 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1242 1242 ;
1243 1243 un->un_blknoshift = secdiv;
1244 1244 return (0);
1245 1245 }
1246 1246
1247 1247 /*
1248 1248 * Unix Entry Points
1249 1249 */
1250 1250
1251 1251 /* ARGSUSED3 */
1252 1252 static int
1253 1253 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
1254 1254 {
1255 1255 dev_t dev = *dev_p;
1256 1256 int rval = EIO;
1257 1257 int partmask;
1258 1258 int nodelay = (flag & (FNDELAY | FNONBLOCK));
1259 1259 int i;
1260 1260 char kstatname[KSTAT_STRLEN];
1261 1261 diskaddr_t lblocks;
1262 1262 char *partname;
1263 1263
1264 1264 GET_SOFT_STATE(dev);
1265 1265
1266 1266 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1267 1267 "Inside Open flag %x, otyp %x\n", flag, otyp);
1268 1268
1269 1269 if (otyp >= OTYPCNT) {
1270 1270 return (EINVAL);
1271 1271 }
1272 1272
1273 1273 partmask = 1 << part;
1274 1274
1275 1275 /*
1276 1276 * We use a semaphore here in order to serialize
1277 1277 * open and close requests on the device.
1278 1278 */
1279 1279 sema_p(&un->un_semoclose);
1280 1280
1281 1281 mutex_enter(DCD_MUTEX);
1282 1282
1283 1283 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) {
1284 1284 rval = ENXIO;
1285 1285 goto done;
1286 1286 }
1287 1287
1288 1288 while (un->un_state == DCD_STATE_SUSPENDED) {
1289 1289 cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1290 1290 }
1291 1291
1292 1292 if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) {
1293 1293 mutex_exit(DCD_MUTEX);
1294 1294 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1295 1295 != DDI_SUCCESS) {
1296 1296 mutex_enter(DCD_MUTEX);
1297 1297 rval = EIO;
1298 1298 goto done;
1299 1299 }
1300 1300 mutex_enter(DCD_MUTEX);
1301 1301 }
1302 1302
1303 1303 /*
1304 1304 * set make_dcd_cmd() flags and stat_size here since these
1305 1305 * are unlikely to change
1306 1306 */
1307 1307 un->un_cmd_flags = 0;
1308 1308
1309 1309 un->un_cmd_stat_size = 2;
1310 1310
1311 1311 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n",
1312 1312 (void *)un);
1313 1313 /*
1314 1314 * check for previous exclusive open
1315 1315 */
1316 1316 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1317 1317 "exclopen=%x, flag=%x, regopen=%x\n",
1318 1318 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
1319 1319 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1320 1320 "Exclusive open flag %x, partmask %x\n",
1321 1321 un->un_exclopen, partmask);
1322 1322
1323 1323 if (un->un_exclopen & (partmask)) {
1324 1324 failed_exclusive:
1325 1325 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1326 1326 "exclusive open fails\n");
1327 1327 rval = EBUSY;
1328 1328 goto done;
1329 1329 }
1330 1330
1331 1331 if (flag & FEXCL) {
1332 1332 int i;
1333 1333 if (un->un_ocmap.lyropen[part]) {
1334 1334 goto failed_exclusive;
1335 1335 }
1336 1336 for (i = 0; i < (OTYPCNT - 1); i++) {
1337 1337 if (un->un_ocmap.regopen[i] & (partmask)) {
1338 1338 goto failed_exclusive;
1339 1339 }
1340 1340 }
1341 1341 }
1342 1342 if (flag & FWRITE) {
1343 1343 mutex_exit(DCD_MUTEX);
1344 1344 if (dcd_check_wp(dev)) {
1345 1345 sema_v(&un->un_semoclose);
1346 1346 return (EROFS);
1347 1347 }
1348 1348 mutex_enter(DCD_MUTEX);
1349 1349 }
1350 1350
1351 1351 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1352 1352 "Check Write Protect handled\n");
1353 1353
1354 1354 if (!nodelay) {
1355 1355 mutex_exit(DCD_MUTEX);
1356 1356 if ((rval = dcd_ready_and_valid(dev, un)) != 0) {
1357 1357 rval = EIO;
1358 1358 }
1359 1359 (void) pm_idle_component(DCD_DEVINFO, 0);
1360 1360 /*
1361 1361 * Fail if device is not ready or if the number of disk
1362 1362 * blocks is zero or negative for non CD devices.
1363 1363 */
1364 1364 if (rval || cmlb_partinfo(un->un_dklbhandle,
1365 1365 part, &lblocks, NULL, &partname, NULL, 0) ||
1366 1366 lblocks <= 0) {
1367 1367 rval = EIO;
1368 1368 mutex_enter(DCD_MUTEX);
1369 1369 goto done;
1370 1370 }
1371 1371 mutex_enter(DCD_MUTEX);
1372 1372 }
1373 1373
1374 1374 if (otyp == OTYP_LYR) {
1375 1375 un->un_ocmap.lyropen[part]++;
1376 1376 } else {
1377 1377 un->un_ocmap.regopen[otyp] |= partmask;
1378 1378 }
1379 1379
1380 1380 /*
1381 1381 * set up open and exclusive open flags
1382 1382 */
1383 1383 if (flag & FEXCL) {
1384 1384 un->un_exclopen |= (partmask);
1385 1385 }
1386 1386
1387 1387
1388 1388 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1389 1389 "open of part %d type %d\n",
1390 1390 part, otyp);
1391 1391
1392 1392 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1393 1393 "Kstats getting updated\n");
1394 1394 /*
1395 1395 * only create kstats for disks, CD kstats created in dcdattach
1396 1396 */
1397 1397 _NOTE(NO_COMPETING_THREADS_NOW);
1398 1398 mutex_exit(DCD_MUTEX);
1399 1399 if (un->un_stats == (kstat_t *)0) {
1400 1400 un->un_stats = kstat_create("dad", instance,
1401 1401 NULL, "disk", KSTAT_TYPE_IO, 1,
1402 1402 KSTAT_FLAG_PERSISTENT);
1403 1403 if (un->un_stats) {
1404 1404 un->un_stats->ks_lock = DCD_MUTEX;
1405 1405 kstat_install(un->un_stats);
1406 1406 }
1407 1407
1408 1408 /*
1409 1409 * set up partition statistics for each partition
1410 1410 * with number of blocks > 0
1411 1411 */
1412 1412 if (!nodelay) {
1413 1413 for (i = 0; i < NDKMAP; i++) {
1414 1414 if ((un->un_pstats[i] == (kstat_t *)0) &&
1415 1415 (cmlb_partinfo(un->un_dklbhandle,
1416 1416 i, &lblocks, NULL, &partname,
1417 1417 NULL, 0) == 0) && lblocks > 0) {
1418 1418 (void) sprintf(kstatname, "dad%d,%s",
1419 1419 instance, partname);
1420 1420 un->un_pstats[i] = kstat_create("dad",
1421 1421 instance,
1422 1422 kstatname,
1423 1423 "partition",
1424 1424 KSTAT_TYPE_IO,
1425 1425 1,
1426 1426 KSTAT_FLAG_PERSISTENT);
1427 1427 if (un->un_pstats[i]) {
1428 1428 un->un_pstats[i]->ks_lock =
1429 1429 DCD_MUTEX;
1430 1430 kstat_install(un->un_pstats[i]);
1431 1431 }
1432 1432 }
1433 1433 }
1434 1434 }
1435 1435 /*
1436 1436 * set up error kstats
1437 1437 */
1438 1438 (void) dcd_create_errstats(un, instance);
1439 1439 }
1440 1440 #ifndef lint
1441 1441 _NOTE(COMPETING_THREADS_NOW);
1442 1442 #endif
1443 1443
1444 1444 sema_v(&un->un_semoclose);
1445 1445 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n");
1446 1446 return (0);
1447 1447
1448 1448 done:
1449 1449 mutex_exit(DCD_MUTEX);
1450 1450 sema_v(&un->un_semoclose);
1451 1451 return (rval);
1452 1452
1453 1453 }
1454 1454
1455 1455 /*
1456 1456 * Test if disk is ready and has a valid geometry.
1457 1457 */
1458 1458 static int
1459 1459 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un)
1460 1460 {
1461 1461 int rval = 1;
1462 1462 int g_error = 0;
1463 1463
1464 1464 mutex_enter(DCD_MUTEX);
1465 1465 /*
1466 1466 * cmds outstanding
1467 1467 */
1468 1468 if (un->un_ncmds == 0) {
1469 1469 (void) dcd_unit_ready(dev);
1470 1470 }
1471 1471
1472 1472 /*
1473 1473 * If device is not yet ready here, inform it is offline
1474 1474 */
1475 1475 if (un->un_state == DCD_STATE_NORMAL) {
1476 1476 rval = dcd_unit_ready(dev);
1477 1477 if (rval != 0 && rval != EACCES) {
1478 1478 dcd_offline(un, 1);
1479 1479 goto done;
1480 1480 }
1481 1481 }
1482 1482
1483 1483 if (un->un_format_in_progress == 0) {
1484 1484 g_error = dcd_validate_geometry(un);
1485 1485 }
1486 1486
1487 1487 /*
1488 1488 * check if geometry was valid. We don't check the validity of
1489 1489 * geometry for CDROMS.
1490 1490 */
1491 1491
1492 1492 if (g_error == DCD_BAD_LABEL) {
1493 1493 rval = 1;
1494 1494 goto done;
1495 1495 }
1496 1496
1497 1497
1498 1498 /*
1499 1499 * the state has changed; inform the media watch routines
1500 1500 */
1501 1501 un->un_mediastate = DKIO_INSERTED;
1502 1502 cv_broadcast(&un->un_state_cv);
1503 1503 rval = 0;
1504 1504
1505 1505 done:
1506 1506 mutex_exit(DCD_MUTEX);
1507 1507 return (rval);
1508 1508 }
1509 1509
1510 1510
1511 1511 /*ARGSUSED*/
1512 1512 static int
1513 1513 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
1514 1514 {
1515 1515 uchar_t *cp;
1516 1516 int i;
1517 1517
1518 1518 GET_SOFT_STATE(dev);
1519 1519
1520 1520
1521 1521 if (otyp >= OTYPCNT)
1522 1522 return (ENXIO);
1523 1523
1524 1524 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1525 1525 "close of part %d type %d\n",
1526 1526 part, otyp);
1527 1527 sema_p(&un->un_semoclose);
1528 1528
1529 1529 mutex_enter(DCD_MUTEX);
1530 1530
1531 1531 if (un->un_exclopen & (1<<part)) {
1532 1532 un->un_exclopen &= ~(1<<part);
1533 1533 }
1534 1534
1535 1535 if (otyp == OTYP_LYR) {
1536 1536 un->un_ocmap.lyropen[part] -= 1;
1537 1537 } else {
1538 1538 un->un_ocmap.regopen[otyp] &= ~(1<<part);
1539 1539 }
1540 1540
1541 1541 cp = &un->un_ocmap.chkd[0];
1542 1542 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
1543 1543 if (*cp != (uchar_t)0) {
1544 1544 break;
1545 1545 }
1546 1546 cp++;
1547 1547 }
1548 1548
1549 1549 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
1550 1550 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n");
1551 1551 if (un->un_state == DCD_STATE_OFFLINE) {
1552 1552 dcd_offline(un, 1);
1553 1553 }
1554 1554
1555 1555 mutex_exit(DCD_MUTEX);
1556 1556 (void) cmlb_close(un->un_dklbhandle, 0);
1557 1557
1558 1558 _NOTE(NO_COMPETING_THREADS_NOW);
1559 1559 if (un->un_stats) {
1560 1560 kstat_delete(un->un_stats);
1561 1561 un->un_stats = 0;
1562 1562 }
1563 1563 for (i = 0; i < NDKMAP; i++) {
1564 1564 if (un->un_pstats[i]) {
1565 1565 kstat_delete(un->un_pstats[i]);
1566 1566 un->un_pstats[i] = (kstat_t *)0;
1567 1567 }
1568 1568 }
1569 1569
1570 1570 if (un->un_errstats) {
1571 1571 kstat_delete(un->un_errstats);
1572 1572 un->un_errstats = (kstat_t *)0;
1573 1573 }
1574 1574 mutex_enter(DCD_MUTEX);
1575 1575
1576 1576 #ifndef lint
1577 1577 _NOTE(COMPETING_THREADS_NOW);
1578 1578 #endif
1579 1579 }
1580 1580
1581 1581 mutex_exit(DCD_MUTEX);
1582 1582 sema_v(&un->un_semoclose);
1583 1583 return (0);
1584 1584 }
1585 1585
1586 1586 static void
1587 1587 dcd_offline(struct dcd_disk *un, int bechatty)
1588 1588 {
1589 1589 if (bechatty)
1590 1590 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n");
1591 1591
1592 1592 mutex_exit(DCD_MUTEX);
1593 1593 cmlb_invalidate(un->un_dklbhandle, 0);
1594 1594 mutex_enter(DCD_MUTEX);
1595 1595 }
1596 1596
1597 1597 /*
1598 1598 * Given the device number return the devinfo pointer
1599 1599 * from the scsi_device structure.
1600 1600 */
1601 1601 /*ARGSUSED*/
1602 1602 static int
1603 1603 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1604 1604 {
1605 1605 dev_t dev;
1606 1606 struct dcd_disk *un;
1607 1607 int instance, error;
1608 1608
1609 1609
1610 1610 switch (infocmd) {
1611 1611 case DDI_INFO_DEVT2DEVINFO:
1612 1612 dev = (dev_t)arg;
1613 1613 instance = DCDUNIT(dev);
1614 1614 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)
1615 1615 return (DDI_FAILURE);
1616 1616 *result = (void *) DCD_DEVINFO;
1617 1617 error = DDI_SUCCESS;
1618 1618 break;
1619 1619 case DDI_INFO_DEVT2INSTANCE:
1620 1620 dev = (dev_t)arg;
1621 1621 instance = DCDUNIT(dev);
1622 1622 *result = (void *)(uintptr_t)instance;
1623 1623 error = DDI_SUCCESS;
1624 1624 break;
1625 1625 default:
1626 1626 error = DDI_FAILURE;
1627 1627 }
1628 1628 return (error);
1629 1629 }
1630 1630
1631 1631 /*
1632 1632 * property operation routine. return the number of blocks for the partition
1633 1633 * in question or forward the request to the propery facilities.
1634 1634 */
1635 1635 static int
1636 1636 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1637 1637 char *name, caddr_t valuep, int *lengthp)
1638 1638 {
1639 1639 struct dcd_disk *un;
1640 1640
1641 1641 if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL)
1642 1642 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1643 1643 name, valuep, lengthp));
1644 1644
1645 1645 return (cmlb_prop_op(un->un_dklbhandle,
1646 1646 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
1647 1647 DCDPART(dev), NULL));
1648 1648 }
1649 1649
1650 1650 /*
1651 1651 * These routines perform raw i/o operations.
1652 1652 */
1653 1653 /*ARGSUSED*/
1654 1654 void
1655 1655 dcduscsimin(struct buf *bp)
1656 1656 {
1657 1657
1658 1658 }
1659 1659
1660 1660
1661 1661 static void
1662 1662 dcdmin(struct buf *bp)
1663 1663 {
1664 1664 struct dcd_disk *un;
1665 1665 int instance;
1666 1666 minor_t minor = getminor(bp->b_edev);
1667 1667 instance = minor >> DCDUNIT_SHIFT;
1668 1668 un = ddi_get_soft_state(dcd_state, instance);
1669 1669
1670 1670 if (bp->b_bcount > un->un_max_xfer_size)
1671 1671 bp->b_bcount = un->un_max_xfer_size;
1672 1672 }
1673 1673
1674 1674
1675 1675 /* ARGSUSED2 */
1676 1676 static int
1677 1677 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p)
1678 1678 {
1679 1679 int secmask;
1680 1680 GET_SOFT_STATE(dev);
1681 1681 #ifdef lint
1682 1682 part = part;
1683 1683 #endif /* lint */
1684 1684 secmask = un->un_secsize - 1;
1685 1685
1686 1686 if (uio->uio_loffset & ((offset_t)(secmask))) {
1687 1687 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1688 1688 "file offset not modulo %d\n",
1689 1689 un->un_secsize);
1690 1690 return (EINVAL);
1691 1691 } else if (uio->uio_iov->iov_len & (secmask)) {
1692 1692 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1693 1693 "transfer length not modulo %d\n", un->un_secsize);
1694 1694 return (EINVAL);
1695 1695 }
1696 1696 return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio));
1697 1697 }
1698 1698
1699 1699 /* ARGSUSED2 */
1700 1700 static int
1701 1701 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1702 1702 {
1703 1703 int secmask;
1704 1704 struct uio *uio = aio->aio_uio;
1705 1705 GET_SOFT_STATE(dev);
1706 1706 #ifdef lint
1707 1707 part = part;
1708 1708 #endif /* lint */
1709 1709 secmask = un->un_secsize - 1;
1710 1710
1711 1711 if (uio->uio_loffset & ((offset_t)(secmask))) {
1712 1712 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1713 1713 "file offset not modulo %d\n",
1714 1714 un->un_secsize);
1715 1715 return (EINVAL);
1716 1716 } else if (uio->uio_iov->iov_len & (secmask)) {
1717 1717 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1718 1718 "transfer length not modulo %d\n", un->un_secsize);
1719 1719 return (EINVAL);
1720 1720 }
1721 1721 return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio));
1722 1722 }
1723 1723
1724 1724 /* ARGSUSED2 */
1725 1725 static int
1726 1726 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
1727 1727 {
1728 1728 int secmask;
1729 1729 GET_SOFT_STATE(dev);
1730 1730 #ifdef lint
1731 1731 part = part;
1732 1732 #endif /* lint */
1733 1733 secmask = un->un_secsize - 1;
1734 1734
1735 1735 if (uio->uio_loffset & ((offset_t)(secmask))) {
1736 1736 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1737 1737 "file offset not modulo %d\n",
1738 1738 un->un_secsize);
1739 1739 return (EINVAL);
1740 1740 } else if (uio->uio_iov->iov_len & (secmask)) {
1741 1741 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1742 1742 "transfer length not modulo %d\n", un->un_secsize);
1743 1743 return (EINVAL);
1744 1744 }
1745 1745 return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin,
1746 1746 uio));
1747 1747 }
1748 1748
1749 1749 /* ARGSUSED2 */
1750 1750 static int
1751 1751 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1752 1752 {
1753 1753 int secmask;
1754 1754 struct uio *uio = aio->aio_uio;
1755 1755 GET_SOFT_STATE(dev);
1756 1756 #ifdef lint
1757 1757 part = part;
1758 1758 #endif /* lint */
1759 1759 secmask = un->un_secsize - 1;
1760 1760
1761 1761 if (uio->uio_loffset & ((offset_t)(secmask))) {
1762 1762 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1763 1763 "file offset not modulo %d\n",
1764 1764 un->un_secsize);
1765 1765 return (EINVAL);
1766 1766 } else if (uio->uio_iov->iov_len & (secmask)) {
1767 1767 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1768 1768 "transfer length not modulo %d\n", un->un_secsize);
1769 1769 return (EINVAL);
1770 1770 }
1771 1771 return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio));
1772 1772 }
1773 1773
1774 1774 /*
1775 1775 * strategy routine
1776 1776 */
1777 1777 static int
1778 1778 dcdstrategy(struct buf *bp)
1779 1779 {
1780 1780 struct dcd_disk *un;
1781 1781 struct diskhd *dp;
1782 1782 int i;
1783 1783 minor_t minor = getminor(bp->b_edev);
1784 1784 diskaddr_t p_lblksrt;
1785 1785 diskaddr_t lblocks;
1786 1786 diskaddr_t bn;
1787 1787
1788 1788 if ((un = ddi_get_soft_state(dcd_state,
1789 1789 minor >> DCDUNIT_SHIFT)) == NULL ||
1790 1790 un->un_state == DCD_STATE_DUMPING ||
1791 1791 ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)) {
1792 1792 SET_BP_ERROR(bp, ((un) ? ENXIO : EIO));
1793 1793 error:
1794 1794 bp->b_resid = bp->b_bcount;
1795 1795 biodone(bp);
1796 1796 return (0);
1797 1797 }
1798 1798
1799 1799 /*
1800 1800 * If the request size (buf->b_bcount)is greater than the size
1801 1801 * (un->un_max_xfer_size) supported by the target driver fail
1802 1802 * the request with EINVAL error code.
1803 1803 *
1804 1804 * We are not supposed to receive requests exceeding
1805 1805 * un->un_max_xfer_size size because the caller is expected to
1806 1806 * check what is the maximum size that is supported by this
1807 1807 * driver either through ioctl or dcdmin routine(which is private
1808 1808 * to this driver).
1809 1809 * But we have seen cases (like meta driver(md))where dcdstrategy
1810 1810 * called with more than supported size and cause data corruption.
1811 1811 */
1812 1812
1813 1813 if (bp->b_bcount > un->un_max_xfer_size) {
1814 1814 SET_BP_ERROR(bp, EINVAL);
1815 1815 goto error;
1816 1816 }
1817 1817
1818 1818 TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START,
1819 1819 "dcdstrategy_start: bp 0x%p un 0x%p", bp, un);
1820 1820
1821 1821 /*
1822 1822 * Commands may sneak in while we released the mutex in
1823 1823 * DDI_SUSPEND, we should block new commands.
1824 1824 */
1825 1825 mutex_enter(DCD_MUTEX);
1826 1826 while (un->un_state == DCD_STATE_SUSPENDED) {
1827 1827 cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1828 1828 }
1829 1829
1830 1830 if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1831 1831 mutex_exit(DCD_MUTEX);
1832 1832 (void) pm_idle_component(DCD_DEVINFO, 0);
1833 1833 if (pm_raise_power(DCD_DEVINFO, 0,
1834 1834 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
1835 1835 SET_BP_ERROR(bp, EIO);
1836 1836 goto error;
1837 1837 }
1838 1838 mutex_enter(DCD_MUTEX);
1839 1839 }
1840 1840 mutex_exit(DCD_MUTEX);
1841 1841
1842 1842 /*
1843 1843 * Map-in the buffer in case starting address is not word aligned.
1844 1844 */
1845 1845
1846 1846 if (((uintptr_t)bp->b_un.b_addr) & 0x1)
1847 1847 bp_mapin(bp);
1848 1848
1849 1849 bp->b_flags &= ~(B_DONE|B_ERROR);
1850 1850 bp->b_resid = 0;
1851 1851 bp->av_forw = 0;
1852 1852
1853 1853 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1854 1854 "bp->b_bcount %lx\n", bp->b_bcount);
1855 1855
1856 1856 if (bp != un->un_sbufp) {
1857 1857 validated: if (cmlb_partinfo(un->un_dklbhandle,
1858 1858 minor & DCDPART_MASK,
1859 1859 &lblocks,
1860 1860 &p_lblksrt,
1861 1861 NULL,
1862 1862 NULL,
1863 1863 0) == 0) {
1864 1864
1865 1865 bn = dkblock(bp);
1866 1866
1867 1867 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1868 1868 "dkblock(bp) is %llu\n", bn);
1869 1869
1870 1870 i = 0;
1871 1871 if (bn < 0) {
1872 1872 i = -1;
1873 1873 } else if (bn >= lblocks) {
1874 1874 /*
1875 1875 * For proper comparison, file system block
1876 1876 * number has to be scaled to actual CD
1877 1877 * transfer size.
1878 1878 * Since all the CDROM operations
1879 1879 * that have Sun Labels are in the correct
1880 1880 * block size this will work for CD's. This
1881 1881 * will have to change when we have different
1882 1882 * sector sizes.
1883 1883 *
1884 1884 * if bn == lblocks,
1885 1885 * Not an error, resid == count
1886 1886 */
1887 1887 if (bn > lblocks) {
1888 1888 i = -1;
1889 1889 } else {
1890 1890 i = 1;
1891 1891 }
1892 1892 } else if (bp->b_bcount & (un->un_secsize-1)) {
1893 1893 /*
1894 1894 * This should really be:
1895 1895 *
1896 1896 * ... if (bp->b_bcount & (un->un_lbasize-1))
1897 1897 *
1898 1898 */
1899 1899 i = -1;
1900 1900 } else {
1901 1901 if (!bp->b_bcount) {
1902 1902 printf("Waring : Zero read or Write\n");
1903 1903 goto error;
1904 1904 }
1905 1905 /*
1906 1906 * sort by absolute block number.
1907 1907 */
1908 1908 bp->b_resid = bn;
1909 1909 bp->b_resid += p_lblksrt;
1910 1910 /*
1911 1911 * zero out av_back - this will be a signal
1912 1912 * to dcdstart to go and fetch the resources
1913 1913 */
1914 1914 bp->av_back = NO_PKT_ALLOCATED;
1915 1915 }
1916 1916
1917 1917 /*
1918 1918 * Check to see whether or not we are done
1919 1919 * (with or without errors).
1920 1920 */
1921 1921
1922 1922 if (i != 0) {
1923 1923 if (i < 0) {
1924 1924 bp->b_flags |= B_ERROR;
1925 1925 }
1926 1926 goto error;
1927 1927 }
1928 1928 } else {
1929 1929 /*
1930 1930 * opened in NDELAY/NONBLOCK mode?
1931 1931 * Check if disk is ready and has a valid geometry
1932 1932 */
1933 1933 if (dcd_ready_and_valid(bp->b_edev, un) == 0) {
1934 1934 goto validated;
1935 1935 } else {
1936 1936 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
1937 1937 "i/o to invalid geometry\n");
1938 1938 SET_BP_ERROR(bp, EIO);
1939 1939 goto error;
1940 1940 }
1941 1941 }
1942 1942 } else if (BP_HAS_NO_PKT(bp)) {
1943 1943 struct udcd_cmd *tscmdp;
1944 1944 struct dcd_cmd *tcmdp;
1945 1945 /*
1946 1946 * This indicates that it is a special buffer
1947 1947 * This could be a udcd-cmd and hence call bp_mapin just
1948 1948 * in case that it could be a PIO command issued.
1949 1949 */
1950 1950 tscmdp = (struct udcd_cmd *)bp->b_forw;
1951 1951 tcmdp = tscmdp->udcd_cmd;
1952 1952 if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) &&
1953 1953 (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) &&
1954 1954 (tcmdp->cmd != IDENTIFY_DMA) &&
1955 1955 (tcmdp->cmd != ATA_FLUSH_CACHE)) {
1956 1956 bp_mapin(bp);
1957 1957 }
1958 1958 }
1959 1959
1960 1960 /*
1961 1961 * We are doing it a bit non-standard. That is, the
1962 1962 * head of the b_actf chain is *not* the active command-
1963 1963 * it is just the head of the wait queue. The reason
1964 1964 * we do this is that the head of the b_actf chain is
1965 1965 * guaranteed to not be moved by disksort(), so that
1966 1966 * our restart command (pointed to by
1967 1967 * b_forw) and the head of the wait queue (b_actf) can
1968 1968 * have resources granted without it getting lost in
1969 1969 * the queue at some later point (where we would have
1970 1970 * to go and look for it).
1971 1971 */
1972 1972 mutex_enter(DCD_MUTEX);
1973 1973
1974 1974 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
1975 1975
1976 1976 dp = &un->un_utab;
1977 1977
1978 1978 if (dp->b_actf == NULL) {
1979 1979 dp->b_actf = bp;
1980 1980 dp->b_actl = bp;
1981 1981 } else if ((un->un_state == DCD_STATE_SUSPENDED) &&
1982 1982 bp == un->un_sbufp) {
1983 1983 bp->b_actf = dp->b_actf;
1984 1984 dp->b_actf = bp;
1985 1985 } else {
1986 1986 TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START,
1987 1987 "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
1988 1988 dp, bp, un);
1989 1989 disksort(dp, bp);
1990 1990 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END,
1991 1991 "dcdstrategy_disksort_end");
1992 1992 }
1993 1993
1994 1994 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1995 1995 "ncmd %x , throttle %x, forw 0x%p\n",
1996 1996 un->un_ncmds, un->un_throttle, (void *)dp->b_forw);
1997 1997 ASSERT(un->un_ncmds >= 0);
1998 1998 ASSERT(un->un_throttle >= 0);
1999 1999 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
2000 2000 dcdstart(un);
2001 2001 } else if (BP_HAS_NO_PKT(dp->b_actf)) {
2002 2002 struct buf *cmd_bp;
2003 2003
2004 2004 cmd_bp = dp->b_actf;
2005 2005 cmd_bp->av_back = ALLOCATING_PKT;
2006 2006 mutex_exit(DCD_MUTEX);
2007 2007 /*
2008 2008 * try and map this one
2009 2009 */
2010 2010 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START,
2011 2011 "dcdstrategy_small_window_call (begin)");
2012 2012
2013 2013 make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2014 2014
2015 2015 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END,
2016 2016 "dcdstrategy_small_window_call (end)");
2017 2017
2018 2018 /*
2019 2019 * there is a small window where the active cmd
2020 2020 * completes before make_dcd_cmd returns.
2021 2021 * consequently, this cmd never gets started so
2022 2022 * we start it from here
2023 2023 */
2024 2024 mutex_enter(DCD_MUTEX);
2025 2025 if ((un->un_ncmds < un->un_throttle) &&
2026 2026 (dp->b_forw == NULL)) {
2027 2027 dcdstart(un);
2028 2028 }
2029 2029 }
2030 2030 mutex_exit(DCD_MUTEX);
2031 2031
2032 2032 done:
2033 2033 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end");
2034 2034 return (0);
2035 2035 }
2036 2036
2037 2037
2038 2038 /*
2039 2039 * Unit start and Completion
2040 2040 * NOTE: we assume that the caller has at least checked for:
2041 2041 * (un->un_ncmds < un->un_throttle)
2042 2042 * if not, there is no real harm done, dcd_transport() will
2043 2043 * return BUSY
2044 2044 */
2045 2045 static void
2046 2046 dcdstart(struct dcd_disk *un)
2047 2047 {
2048 2048 int status, sort_key;
2049 2049 struct buf *bp;
2050 2050 struct diskhd *dp;
2051 2051 uchar_t state = un->un_last_state;
2052 2052
2053 2053 TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un);
2054 2054
2055 2055 retry:
2056 2056 ASSERT(mutex_owned(DCD_MUTEX));
2057 2057
2058 2058 dp = &un->un_utab;
2059 2059 if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) ||
2060 2060 (dp->b_forw != NULL)) {
2061 2061 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END,
2062 2062 "dcdstart_end (no work)");
2063 2063 return;
2064 2064 }
2065 2065
2066 2066 /*
2067 2067 * remove from active queue
2068 2068 */
2069 2069 dp->b_actf = bp->b_actf;
2070 2070 bp->b_actf = 0;
2071 2071
2072 2072 /*
2073 2073 * increment ncmds before calling dcd_transport because dcdintr
2074 2074 * may be called before we return from dcd_transport!
2075 2075 */
2076 2076 un->un_ncmds++;
2077 2077
2078 2078 /*
2079 2079 * If measuring stats, mark exit from wait queue and
2080 2080 * entrance into run 'queue' if and only if we are
2081 2081 * going to actually start a command.
2082 2082 * Normally the bp already has a packet at this point
2083 2083 */
2084 2084 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
2085 2085
2086 2086 mutex_exit(DCD_MUTEX);
2087 2087
2088 2088 if (BP_HAS_NO_PKT(bp)) {
2089 2089 make_dcd_cmd(un, bp, dcdrunout);
2090 2090 if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) {
2091 2091 mutex_enter(DCD_MUTEX);
2092 2092 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2093 2093
2094 2094 bp->b_actf = dp->b_actf;
2095 2095 dp->b_actf = bp;
2096 2096 New_state(un, DCD_STATE_RWAIT);
2097 2097 un->un_ncmds--;
2098 2098 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END,
2099 2099 "dcdstart_end (No Resources)");
2100 2100 goto done;
2101 2101
2102 2102 } else if (bp->b_flags & B_ERROR) {
2103 2103 mutex_enter(DCD_MUTEX);
2104 2104 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2105 2105
2106 2106 un->un_ncmds--;
2107 2107 bp->b_resid = bp->b_bcount;
2108 2108 if (bp->b_error == 0) {
2109 2109 SET_BP_ERROR(bp, EIO);
2110 2110 }
2111 2111
2112 2112 /*
2113 2113 * restore old state
2114 2114 */
2115 2115 un->un_state = un->un_last_state;
2116 2116 un->un_last_state = state;
2117 2117
2118 2118 mutex_exit(DCD_MUTEX);
2119 2119
2120 2120 biodone(bp);
2121 2121 mutex_enter(DCD_MUTEX);
2122 2122 if (un->un_state == DCD_STATE_SUSPENDED) {
2123 2123 cv_broadcast(&un->un_disk_busy_cv);
2124 2124 }
2125 2125
2126 2126 if ((un->un_ncmds < un->un_throttle) &&
2127 2127 (dp->b_forw == NULL)) {
2128 2128 goto retry;
2129 2129 } else {
2130 2130 goto done;
2131 2131 }
2132 2132 }
2133 2133 }
2134 2134
2135 2135 /*
2136 2136 * Restore resid from the packet, b_resid had been the
2137 2137 * disksort key.
2138 2138 */
2139 2139 sort_key = bp->b_resid;
2140 2140 bp->b_resid = BP_PKT(bp)->pkt_resid;
2141 2141 BP_PKT(bp)->pkt_resid = 0;
2142 2142
2143 2143 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2144 2144 "bp->b_resid %lx, pkt_resid %lx\n",
2145 2145 bp->b_resid, BP_PKT(bp)->pkt_resid);
2146 2146
2147 2147 /*
2148 2148 * We used to check whether or not to try and link commands here.
2149 2149 * Since we have found that there is no performance improvement
2150 2150 * for linked commands, this has not made much sense.
2151 2151 */
2152 2152 if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp)))
2153 2153 != TRAN_ACCEPT) {
2154 2154 mutex_enter(DCD_MUTEX);
2155 2155 un->un_ncmds--;
2156 2156 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2157 2157 "transport returned %x\n", status);
2158 2158 if (status == TRAN_BUSY) {
2159 2159 DCD_DO_ERRSTATS(un, dcd_transerrs);
2160 2160 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2161 2161 dcd_handle_tran_busy(bp, dp, un);
2162 2162 if (un->un_ncmds > 0) {
2163 2163 bp->b_resid = sort_key;
2164 2164 }
2165 2165 } else {
2166 2166 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2167 2167 mutex_exit(DCD_MUTEX);
2168 2168
2169 2169 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2170 2170 "transport rejected (%d)\n",
2171 2171 status);
2172 2172 SET_BP_ERROR(bp, EIO);
2173 2173 bp->b_resid = bp->b_bcount;
2174 2174 if (bp != un->un_sbufp) {
2175 2175 dcd_destroy_pkt(BP_PKT(bp));
2176 2176 }
2177 2177 biodone(bp);
2178 2178
2179 2179 mutex_enter(DCD_MUTEX);
2180 2180 if (un->un_state == DCD_STATE_SUSPENDED) {
2181 2181 cv_broadcast(&un->un_disk_busy_cv);
2182 2182 }
2183 2183 if ((un->un_ncmds < un->un_throttle) &&
2184 2184 (dp->b_forw == NULL)) {
2185 2185 goto retry;
2186 2186 }
2187 2187 }
2188 2188 } else {
2189 2189 mutex_enter(DCD_MUTEX);
2190 2190
2191 2191 if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) {
2192 2192 struct buf *cmd_bp;
2193 2193
2194 2194 cmd_bp = dp->b_actf;
2195 2195 cmd_bp->av_back = ALLOCATING_PKT;
2196 2196 mutex_exit(DCD_MUTEX);
2197 2197 /*
2198 2198 * try and map this one
2199 2199 */
2200 2200 TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START,
2201 2201 "dcdstart_small_window_start");
2202 2202
2203 2203 make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2204 2204
2205 2205 TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END,
2206 2206 "dcdstart_small_window_end");
2207 2207 /*
2208 2208 * there is a small window where the active cmd
2209 2209 * completes before make_dcd_cmd returns.
2210 2210 * consequently, this cmd never gets started so
2211 2211 * we start it from here
2212 2212 */
2213 2213 mutex_enter(DCD_MUTEX);
2214 2214 if ((un->un_ncmds < un->un_throttle) &&
2215 2215 (dp->b_forw == NULL)) {
2216 2216 goto retry;
2217 2217 }
2218 2218 }
2219 2219 }
2220 2220
2221 2221 done:
2222 2222 ASSERT(mutex_owned(DCD_MUTEX));
2223 2223 TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end");
2224 2224 }
2225 2225
2226 2226 /*
2227 2227 * make_dcd_cmd: create a pkt
2228 2228 */
2229 2229 static void
2230 2230 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)())
2231 2231 {
2232 2232 auto int count, com, direction;
2233 2233 struct dcd_pkt *pkt;
2234 2234 int flags, tval;
2235 2235
2236 2236 _NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp))
2237 2237 TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START,
2238 2238 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un);
2239 2239
2240 2240
2241 2241 flags = un->un_cmd_flags;
2242 2242
2243 2243 if (bp != un->un_sbufp) {
2244 2244 int partition = DCDPART(bp->b_edev);
2245 2245 diskaddr_t p_lblksrt;
2246 2246 diskaddr_t lblocks;
2247 2247 long secnt;
2248 2248 uint32_t blkno;
2249 2249 int dkl_nblk, delta;
2250 2250 long resid;
2251 2251
2252 2252 if (cmlb_partinfo(un->un_dklbhandle,
2253 2253 partition,
2254 2254 &lblocks,
2255 2255 &p_lblksrt,
2256 2256 NULL,
2257 2257 NULL,
2258 2258 0) != NULL) {
2259 2259 lblocks = 0;
2260 2260 p_lblksrt = 0;
2261 2261 }
2262 2262
2263 2263 dkl_nblk = (int)lblocks;
2264 2264
2265 2265 /*
2266 2266 * Make sure we don't run off the end of a partition.
2267 2267 *
2268 2268 * Put this test here so that we can adjust b_count
2269 2269 * to accurately reflect the actual amount we are
2270 2270 * goint to transfer.
2271 2271 */
2272 2272
2273 2273 /*
2274 2274 * First, compute partition-relative block number
2275 2275 */
2276 2276 blkno = dkblock(bp);
2277 2277 secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv;
2278 2278 count = MIN(secnt, dkl_nblk - blkno);
2279 2279 if (count != secnt) {
2280 2280 /*
2281 2281 * We have an overrun
2282 2282 */
2283 2283 resid = (secnt - count) << un->un_secdiv;
2284 2284 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2285 2285 "overrun by %ld sectors\n",
2286 2286 secnt - count);
2287 2287 bp->b_bcount -= resid;
2288 2288 } else {
2289 2289 resid = 0;
2290 2290 }
2291 2291
2292 2292 /*
2293 2293 * Adjust block number to absolute
2294 2294 */
2295 2295 delta = (int)p_lblksrt;
2296 2296 blkno += delta;
2297 2297
2298 2298 mutex_enter(DCD_MUTEX);
2299 2299 /*
2300 2300 * This is for devices having block size different from
2301 2301 * from DEV_BSIZE (e.g. 2K CDROMs).
2302 2302 */
2303 2303 if (un->un_lbasize != un->un_secsize) {
2304 2304 blkno >>= un->un_blknoshift;
2305 2305 count >>= un->un_blknoshift;
2306 2306 }
2307 2307 mutex_exit(DCD_MUTEX);
2308 2308
2309 2309 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START,
2310 2310 "make_dcd_cmd_init_pkt_call (begin)");
2311 2311 pkt = dcd_init_pkt(ROUTE, NULL, bp,
2312 2312 (uint32_t)sizeof (struct dcd_cmd),
2313 2313 un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT,
2314 2314 func, (caddr_t)un);
2315 2315 TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END,
2316 2316 "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt);
2317 2317 if (!pkt) {
2318 2318 bp->b_bcount += resid;
2319 2319 bp->av_back = NO_PKT_ALLOCATED;
2320 2320 TRACE_0(TR_FAC_DADA,
2321 2321 TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END,
2322 2322 "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2323 2323 return;
2324 2324 }
2325 2325 if (bp->b_flags & B_READ) {
2326 2326 if ((un->un_dp->options & DMA_SUPPORTTED) ==
2327 2327 DMA_SUPPORTTED) {
2328 2328 com = ATA_READ_DMA;
2329 2329 } else {
2330 2330 if (un->un_dp->options & BLOCK_MODE)
2331 2331 com = ATA_READ_MULTIPLE;
2332 2332 else
2333 2333 com = ATA_READ;
2334 2334 }
2335 2335 direction = DATA_READ;
2336 2336 } else {
2337 2337 if ((un->un_dp->options & DMA_SUPPORTTED) ==
2338 2338 DMA_SUPPORTTED) {
2339 2339 com = ATA_WRITE_DMA;
2340 2340 } else {
2341 2341 if (un->un_dp->options & BLOCK_MODE)
2342 2342 com = ATA_WRITE_MULTIPLE;
2343 2343 else
2344 2344 com = ATA_WRITE;
2345 2345 }
2346 2346 direction = DATA_WRITE;
2347 2347 }
2348 2348
2349 2349 /*
2350 2350 * Save the resid in the packet, temporarily until
2351 2351 * we transport the command.
2352 2352 */
2353 2353 pkt->pkt_resid = resid;
2354 2354
2355 2355 makecommand(pkt, flags, com, blkno, ADD_LBA_MODE,
2356 2356 bp->b_bcount, direction, 0);
2357 2357 tval = dcd_io_time;
2358 2358 } else {
2359 2359
2360 2360 struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw;
2361 2361
2362 2362 /*
2363 2363 * set options
2364 2364 */
2365 2365 if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) {
2366 2366 flags |= FLAG_SILENT;
2367 2367 }
2368 2368 if (scmd->udcd_flags & UDCD_DIAGNOSE)
2369 2369 flags |= FLAG_DIAGNOSE;
2370 2370
2371 2371 if (scmd->udcd_flags & UDCD_NOINTR)
2372 2372 flags |= FLAG_NOINTR;
2373 2373
2374 2374 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
2375 2375 (bp->b_bcount)? bp: NULL,
2376 2376 (uint32_t)sizeof (struct dcd_cmd),
2377 2377 2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un);
2378 2378
2379 2379 if (!pkt) {
2380 2380 bp->av_back = NO_PKT_ALLOCATED;
2381 2381 return;
2382 2382 }
2383 2383
2384 2384 makecommand(pkt, 0, scmd->udcd_cmd->cmd,
2385 2385 scmd->udcd_cmd->sector_num.lba_num,
2386 2386 scmd->udcd_cmd->address_mode,
2387 2387 scmd->udcd_cmd->size,
2388 2388 scmd->udcd_cmd->direction, scmd->udcd_cmd->features);
2389 2389
2390 2390 pkt->pkt_flags = flags;
2391 2391 if (scmd->udcd_timeout == 0)
2392 2392 tval = dcd_io_time;
2393 2393 else
2394 2394 tval = scmd->udcd_timeout;
2395 2395 /* UDAD interface should be decided. */
2396 2396 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2397 2397 "udcd interface\n");
2398 2398 }
2399 2399
2400 2400 pkt->pkt_comp = dcdintr;
2401 2401 pkt->pkt_time = tval;
2402 2402 PKT_SET_BP(pkt, bp);
2403 2403 bp->av_back = (struct buf *)pkt;
2404 2404
2405 2405 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end");
2406 2406 }
2407 2407
2408 2408 /*
2409 2409 * Command completion processing
2410 2410 */
2411 2411 static void
2412 2412 dcdintr(struct dcd_pkt *pkt)
2413 2413 {
2414 2414 struct dcd_disk *un;
2415 2415 struct buf *bp;
2416 2416 int action;
2417 2417 int status;
2418 2418
2419 2419 bp = PKT_GET_BP(pkt);
2420 2420 un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev));
2421 2421
2422 2422 TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un);
2423 2423 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n");
2424 2424
2425 2425 mutex_enter(DCD_MUTEX);
2426 2426 un->un_ncmds--;
2427 2427 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2428 2428 ASSERT(un->un_ncmds >= 0);
2429 2429
2430 2430 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2431 2431 "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt));
2432 2432
2433 2433 /*
2434 2434 * do most common case first
2435 2435 */
2436 2436 if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) {
2437 2437 int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp);
2438 2438
2439 2439 if (un->un_state == DCD_STATE_OFFLINE) {
2440 2440 un->un_state = un->un_last_state;
2441 2441 dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE,
2442 2442 (const char *) diskokay);
2443 2443 }
2444 2444 /*
2445 2445 * If the command is a read or a write, and we have
2446 2446 * a non-zero pkt_resid, that is an error. We should
2447 2447 * attempt to retry the operation if possible.
2448 2448 */
2449 2449 action = COMMAND_DONE;
2450 2450 if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) {
2451 2451 DCD_DO_ERRSTATS(un, dcd_harderrs);
2452 2452 if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) {
2453 2453 PKT_INCR_RETRY_CNT(pkt, 1);
2454 2454 action = QUE_COMMAND;
2455 2455 } else {
2456 2456 /*
2457 2457 * if we have exhausted retries
2458 2458 * a command with a residual is in error in
2459 2459 * this case.
2460 2460 */
2461 2461 action = COMMAND_DONE_ERROR;
2462 2462 }
2463 2463 dcd_log(DCD_DEVINFO, dcd_label,
2464 2464 CE_WARN, "incomplete %s- %s\n",
2465 2465 (bp->b_flags & B_READ)? "read" : "write",
2466 2466 (action == QUE_COMMAND)? "retrying" :
2467 2467 "giving up");
2468 2468 }
2469 2469
2470 2470 /*
2471 2471 * pkt_resid will reflect, at this point, a residual
2472 2472 * of how many bytes left to be transferred there were
2473 2473 * from the actual scsi command. Add this to b_resid i.e
2474 2474 * the amount this driver could not see to transfer,
2475 2475 * to get the total number of bytes not transfered.
2476 2476 */
2477 2477 if (action != QUE_COMMAND) {
2478 2478 bp->b_resid += pkt->pkt_resid;
2479 2479 }
2480 2480
2481 2481 } else if (pkt->pkt_reason != CMD_CMPLT) {
2482 2482 action = dcd_handle_incomplete(un, bp);
2483 2483 }
2484 2484
2485 2485 /*
2486 2486 * If we are in the middle of syncing or dumping, we have got
2487 2487 * here because dcd_transport has called us explictly after
2488 2488 * completing the command in a polled mode. We don't want to
2489 2489 * have a recursive call into dcd_transport again.
2490 2490 */
2491 2491 if (ddi_in_panic() && (action == QUE_COMMAND)) {
2492 2492 action = COMMAND_DONE_ERROR;
2493 2493 }
2494 2494
2495 2495 /*
2496 2496 * save pkt reason; consecutive failures are not reported unless
2497 2497 * fatal
2498 2498 * do not reset last_pkt_reason when the cmd was retried and
2499 2499 * succeeded because
2500 2500 * there maybe more commands comming back with last_pkt_reason
2501 2501 */
2502 2502 if ((un->un_last_pkt_reason != pkt->pkt_reason) &&
2503 2503 ((pkt->pkt_reason != CMD_CMPLT) ||
2504 2504 (PKT_GET_RETRY_CNT(pkt) == 0))) {
2505 2505 un->un_last_pkt_reason = pkt->pkt_reason;
2506 2506 }
2507 2507
2508 2508 switch (action) {
2509 2509 case COMMAND_DONE_ERROR:
2510 2510 error:
2511 2511 if (bp->b_resid == 0) {
2512 2512 bp->b_resid = bp->b_bcount;
2513 2513 }
2514 2514 if (bp->b_error == 0) {
2515 2515 struct dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp;
2516 2516 if (cdbp->cmd == ATA_FLUSH_CACHE &&
2517 2517 (pkt->pkt_scbp[0] & STATUS_ATA_ERR) &&
2518 2518 (pkt->pkt_scbp[1] & ERR_ABORT)) {
2519 2519 SET_BP_ERROR(bp, ENOTSUP);
2520 2520 un->un_flush_not_supported = 1;
2521 2521 } else {
2522 2522 SET_BP_ERROR(bp, EIO);
2523 2523 }
2524 2524 }
2525 2525 bp->b_flags |= B_ERROR;
2526 2526 /*FALLTHROUGH*/
2527 2527 case COMMAND_DONE:
2528 2528 dcddone_and_mutex_exit(un, bp);
2529 2529
2530 2530 TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END,
2531 2531 "dcdintr_end (COMMAND_DONE)");
2532 2532 return;
2533 2533
2534 2534 case QUE_COMMAND:
2535 2535 if (un->un_ncmds >= un->un_throttle) {
2536 2536 struct diskhd *dp = &un->un_utab;
2537 2537
2538 2538 bp->b_actf = dp->b_actf;
2539 2539 dp->b_actf = bp;
2540 2540
2541 2541 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2542 2542
2543 2543 mutex_exit(DCD_MUTEX);
2544 2544 goto exit;
2545 2545 }
2546 2546
2547 2547 un->un_ncmds++;
2548 2548 /* reset the pkt reason again */
2549 2549 pkt->pkt_reason = 0;
2550 2550 DCD_DO_KSTATS(un, kstat_runq_enter, bp);
2551 2551 mutex_exit(DCD_MUTEX);
2552 2552 if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) {
2553 2553 struct diskhd *dp = &un->un_utab;
2554 2554
2555 2555 mutex_enter(DCD_MUTEX);
2556 2556 un->un_ncmds--;
2557 2557 if (status == TRAN_BUSY) {
2558 2558 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2559 2559 dcd_handle_tran_busy(bp, dp, un);
2560 2560 mutex_exit(DCD_MUTEX);
2561 2561 goto exit;
2562 2562 }
2563 2563 DCD_DO_ERRSTATS(un, dcd_transerrs);
2564 2564 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2565 2565
2566 2566 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2567 2567 "requeue of command fails (%x)\n", status);
2568 2568 SET_BP_ERROR(bp, EIO);
2569 2569 bp->b_resid = bp->b_bcount;
2570 2570
2571 2571 dcddone_and_mutex_exit(un, bp);
2572 2572 goto exit;
2573 2573 }
2574 2574 break;
2575 2575
2576 2576 case JUST_RETURN:
2577 2577 default:
2578 2578 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2579 2579 mutex_exit(DCD_MUTEX);
2580 2580 break;
2581 2581 }
2582 2582
2583 2583 exit:
2584 2584 TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end");
2585 2585 }
2586 2586
2587 2587
2588 2588 /*
2589 2589 * Done with a command.
2590 2590 */
2591 2591 static void
2592 2592 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp)
2593 2593 {
2594 2594 struct diskhd *dp;
2595 2595
2596 2596 TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un);
2597 2597
2598 2598 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex));
2599 2599
2600 2600 dp = &un->un_utab;
2601 2601 if (bp == dp->b_forw) {
2602 2602 dp->b_forw = NULL;
2603 2603 }
2604 2604
2605 2605 if (un->un_stats) {
2606 2606 ulong_t n_done = bp->b_bcount - bp->b_resid;
2607 2607 if (bp->b_flags & B_READ) {
2608 2608 IOSP->reads++;
2609 2609 IOSP->nread += n_done;
2610 2610 } else {
2611 2611 IOSP->writes++;
2612 2612 IOSP->nwritten += n_done;
2613 2613 }
2614 2614 }
2615 2615 if (IO_PARTITION_STATS) {
2616 2616 ulong_t n_done = bp->b_bcount - bp->b_resid;
2617 2617 if (bp->b_flags & B_READ) {
2618 2618 IOSP_PARTITION->reads++;
2619 2619 IOSP_PARTITION->nread += n_done;
2620 2620 } else {
2621 2621 IOSP_PARTITION->writes++;
2622 2622 IOSP_PARTITION->nwritten += n_done;
2623 2623 }
2624 2624 }
2625 2625
2626 2626 /*
2627 2627 * Start the next one before releasing resources on this one
2628 2628 */
2629 2629 if (un->un_state == DCD_STATE_SUSPENDED) {
2630 2630 cv_broadcast(&un->un_disk_busy_cv);
2631 2631 } else if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
2632 2632 (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) {
2633 2633 dcdstart(un);
2634 2634 }
2635 2635
2636 2636 mutex_exit(DCD_MUTEX);
2637 2637
2638 2638 if (bp != un->un_sbufp) {
2639 2639 dcd_destroy_pkt(BP_PKT(bp));
2640 2640 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2641 2641 "regular done: resid %ld\n", bp->b_resid);
2642 2642 } else {
2643 2643 ASSERT(un->un_sbuf_busy);
2644 2644 }
2645 2645 TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call");
2646 2646
2647 2647 biodone(bp);
2648 2648
2649 2649 (void) pm_idle_component(DCD_DEVINFO, 0);
2650 2650
2651 2651 TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end");
2652 2652 }
2653 2653
2654 2654
2655 2655 /*
2656 2656 * reset the disk unless the transport layer has already
2657 2657 * cleared the problem
2658 2658 */
2659 2659 #define C1 (STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2660 2660 static void
2661 2661 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt)
2662 2662 {
2663 2663
2664 2664 if ((pkt->pkt_statistics & C1) == 0) {
2665 2665 mutex_exit(DCD_MUTEX);
2666 2666 if (!dcd_reset(ROUTE, RESET_ALL)) {
2667 2667 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2668 2668 "Reset failed");
2669 2669 }
2670 2670 mutex_enter(DCD_MUTEX);
2671 2671 }
2672 2672 }
2673 2673
2674 2674 static int
2675 2675 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp)
2676 2676 {
2677 2677 static char *fail = "ATA transport failed: reason '%s': %s\n";
2678 2678 static char *notresp = "disk not responding to selection\n";
2679 2679 int rval = COMMAND_DONE_ERROR;
2680 2680 int action = COMMAND_SOFT_ERROR;
2681 2681 struct dcd_pkt *pkt = BP_PKT(bp);
2682 2682 int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) &&
2683 2683 (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT));
2684 2684
2685 2685 ASSERT(mutex_owned(DCD_MUTEX));
2686 2686
2687 2687 switch (pkt->pkt_reason) {
2688 2688
2689 2689 case CMD_TIMEOUT:
2690 2690 /*
2691 2691 * This Indicates the already the HBA would have reset
2692 2692 * so Just indicate to retry the command
2693 2693 */
2694 2694 break;
2695 2695
2696 2696 case CMD_INCOMPLETE:
2697 2697 action = dcd_check_error(un, bp);
2698 2698 DCD_DO_ERRSTATS(un, dcd_transerrs);
2699 2699 if (action == COMMAND_HARD_ERROR) {
2700 2700 (void) dcd_reset_disk(un, pkt);
2701 2701 }
2702 2702 break;
2703 2703
2704 2704 case CMD_FATAL:
2705 2705 /*
2706 2706 * Something drastic has gone wrong
2707 2707 */
2708 2708 break;
2709 2709 case CMD_DMA_DERR:
2710 2710 case CMD_DATA_OVR:
2711 2711 /* FALLTHROUGH */
2712 2712
2713 2713 default:
2714 2714 /*
2715 2715 * the target may still be running the command,
2716 2716 * so we should try and reset that target.
2717 2717 */
2718 2718 DCD_DO_ERRSTATS(un, dcd_transerrs);
2719 2719 if ((pkt->pkt_reason != CMD_RESET) &&
2720 2720 (pkt->pkt_reason != CMD_ABORTED)) {
2721 2721 (void) dcd_reset_disk(un, pkt);
2722 2722 }
2723 2723 break;
2724 2724 }
2725 2725
2726 2726 /*
2727 2727 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2728 2728 * reset/aborted because another disk on this bus caused it.
2729 2729 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2730 2730 * of STAT_TIMEOUT/STAT_DEV_RESET
2731 2731 */
2732 2732 if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) {
2733 2733 /* To be written : XXX */
2734 2734 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2735 2735 "Command aborted\n");
2736 2736 }
2737 2737
2738 2738 if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) {
2739 2739 rval = COMMAND_DONE_ERROR;
2740 2740 } else {
2741 2741 if ((rval == COMMAND_DONE_ERROR) &&
2742 2742 (action == COMMAND_SOFT_ERROR) &&
2743 2743 ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) {
2744 2744 PKT_INCR_RETRY_CNT(pkt, 1);
2745 2745 rval = QUE_COMMAND;
2746 2746 }
2747 2747 }
2748 2748
2749 2749 if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) {
2750 2750 /*
2751 2751 * Looks like someone turned off this shoebox.
2752 2752 */
2753 2753 if (un->un_state != DCD_STATE_OFFLINE) {
2754 2754 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2755 2755 (const char *) notresp);
2756 2756 New_state(un, DCD_STATE_OFFLINE);
2757 2757 }
2758 2758 } else if (pkt->pkt_reason == CMD_FATAL) {
2759 2759 /*
2760 2760 * Suppressing the following message for the time being
2761 2761 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2762 2762 * (const char *) notresp);
2763 2763 */
2764 2764 PKT_INCR_RETRY_CNT(pkt, 6);
2765 2765 rval = COMMAND_DONE_ERROR;
2766 2766 New_state(un, DCD_STATE_FATAL);
2767 2767 } else if (be_chatty) {
2768 2768 int in_panic = ddi_in_panic();
2769 2769 if (!in_panic || (rval == COMMAND_DONE_ERROR)) {
2770 2770 if (((pkt->pkt_reason != un->un_last_pkt_reason) &&
2771 2771 (pkt->pkt_reason != CMD_RESET)) ||
2772 2772 (rval == COMMAND_DONE_ERROR) ||
2773 2773 (dcd_error_level == DCD_ERR_ALL)) {
2774 2774 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2775 2775 fail, dcd_rname(pkt->pkt_reason),
2776 2776 (rval == COMMAND_DONE_ERROR) ?
2777 2777 "giving up": "retrying command");
2778 2778 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2779 2779 "retrycount=%x\n",
2780 2780 PKT_GET_RETRY_CNT(pkt));
2781 2781 }
2782 2782 }
2783 2783 }
2784 2784 error:
2785 2785 return (rval);
2786 2786 }
2787 2787
2788 2788 static int
2789 2789 dcd_check_error(struct dcd_disk *un, struct buf *bp)
2790 2790 {
2791 2791 struct diskhd *dp = &un->un_utab;
2792 2792 struct dcd_pkt *pkt = BP_PKT(bp);
2793 2793 int rval = 0;
2794 2794 unsigned char status;
2795 2795 unsigned char error;
2796 2796
2797 2797 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start");
2798 2798 ASSERT(mutex_owned(DCD_MUTEX));
2799 2799
2800 2800 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2801 2801 "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp);
2802 2802
2803 2803 /*
2804 2804 * Here we need to check status first and then if error is indicated
2805 2805 * Then the error register.
2806 2806 */
2807 2807
2808 2808 status = (pkt->pkt_scbp)[0];
2809 2809 if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) {
2810 2810 /*
2811 2811 * There has been a Device Fault - reason for such error
2812 2812 * is vendor specific
2813 2813 * Action to be taken is - Indicate error and reset device.
2814 2814 */
2815 2815
2816 2816 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n");
2817 2817 rval = COMMAND_HARD_ERROR;
2818 2818 } else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) {
2819 2819
2820 2820 /*
2821 2821 * The sector read or written is marginal and hence ECC
2822 2822 * Correction has been applied. Indicate to repair
2823 2823 * Here we need to probably re-assign based on the badblock
2824 2824 * mapping.
2825 2825 */
2826 2826
2827 2827 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2828 2828 "Soft Error on block %x\n",
2829 2829 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2830 2830 rval = COMMAND_SOFT_ERROR;
2831 2831 } else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) {
2832 2832 error = pkt->pkt_scbp[1];
2833 2833
2834 2834 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2835 2835 "Command:0x%x,Error:0x%x,Status:0x%x\n",
2836 2836 GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp),
2837 2837 error, status);
2838 2838 if ((error & ERR_AMNF) == ERR_AMNF) {
2839 2839 /* Address make not found */
2840 2840 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2841 2841 "Address Mark Not Found");
2842 2842 } else if ((error & ERR_TKONF) == ERR_TKONF) {
2843 2843 /* Track 0 Not found */
2844 2844 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2845 2845 "Track 0 Not found \n");
2846 2846 } else if ((error & ERR_IDNF) == ERR_IDNF) {
2847 2847 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2848 2848 " ID not found \n");
2849 2849 } else if ((error & ERR_UNC) == ERR_UNC) {
2850 2850 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2851 2851 "Uncorrectable data Error: Block %x\n",
2852 2852 ((struct dcd_cmd *)pkt->pkt_cdbp)->
2853 2853 sector_num.lba_num);
2854 2854 } else if ((error & ERR_BBK) == ERR_BBK) {
2855 2855 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2856 2856 "Bad block detected: Block %x\n",
2857 2857 ((struct dcd_cmd *)pkt->pkt_cdbp)->
2858 2858 sector_num.lba_num);
2859 2859 } else if ((error & ERR_ABORT) == ERR_ABORT) {
2860 2860 /* Aborted Command */
2861 2861 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2862 2862 " Aborted Command \n");
2863 2863 }
2864 2864 /*
2865 2865 * Return the soft error so that the command
2866 2866 * will be retried.
2867 2867 */
2868 2868 rval = COMMAND_SOFT_ERROR;
2869 2869 }
2870 2870
2871 2871 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end");
2872 2872 return (rval);
2873 2873 }
2874 2874
2875 2875
2876 2876 /*
2877 2877 * System Crash Dump routine
2878 2878 */
2879 2879
2880 2880 #define NDUMP_RETRIES 5
2881 2881
2882 2882 static int
2883 2883 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
2884 2884 {
2885 2885 struct dcd_pkt *pkt;
2886 2886 int i;
2887 2887 struct buf local, *bp;
2888 2888 int err;
2889 2889 unsigned char com;
2890 2890 diskaddr_t p_lblksrt;
2891 2891 diskaddr_t lblocks;
2892 2892
2893 2893 GET_SOFT_STATE(dev);
2894 2894 #ifdef lint
2895 2895 part = part;
2896 2896 #endif /* lint */
2897 2897
2898 2898 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
2899 2899
2900 2900 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)
2901 2901 return (ENXIO);
2902 2902
2903 2903 if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev),
2904 2904 &lblocks, &p_lblksrt, NULL, NULL, 0))
2905 2905 return (ENXIO);
2906 2906
2907 2907 if (blkno+nblk > lblocks) {
2908 2908 return (EINVAL);
2909 2909 }
2910 2910
2911 2911
2912 2912 if ((un->un_state == DCD_STATE_SUSPENDED) ||
2913 2913 (un->un_state == DCD_STATE_PM_SUSPENDED)) {
2914 2914 if (pm_raise_power(DCD_DEVINFO, 0,
2915 2915 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
2916 2916 return (EIO);
2917 2917 }
2918 2918 }
2919 2919
2920 2920 /*
2921 2921 * When cpr calls dcddump, we know that dad is in a
2922 2922 * a good state, so no bus reset is required
2923 2923 */
2924 2924 un->un_throttle = 0;
2925 2925
2926 2926 if ((un->un_state != DCD_STATE_SUSPENDED) &&
2927 2927 (un->un_state != DCD_STATE_DUMPING)) {
2928 2928
2929 2929 New_state(un, DCD_STATE_DUMPING);
2930 2930
2931 2931 /*
2932 2932 * Reset the bus. I'd like to not have to do this,
2933 2933 * but this is the safest thing to do...
2934 2934 */
2935 2935
2936 2936 if (dcd_reset(ROUTE, RESET_ALL) == 0) {
2937 2937 return (EIO);
2938 2938 }
2939 2939
2940 2940 }
2941 2941
2942 2942 blkno += p_lblksrt;
2943 2943
2944 2944 /*
2945 2945 * It should be safe to call the allocator here without
2946 2946 * worrying about being locked for DVMA mapping because
2947 2947 * the address we're passed is already a DVMA mapping
2948 2948 *
2949 2949 * We are also not going to worry about semaphore ownership
2950 2950 * in the dump buffer. Dumping is single threaded at present.
2951 2951 */
2952 2952
2953 2953 bp = &local;
2954 2954 bzero((caddr_t)bp, sizeof (*bp));
2955 2955 bp->b_flags = B_BUSY;
2956 2956 bp->b_un.b_addr = addr;
2957 2957 bp->b_bcount = nblk << DEV_BSHIFT;
2958 2958 bp->b_resid = 0;
2959 2959
2960 2960 for (i = 0; i < NDUMP_RETRIES; i++) {
2961 2961 bp->b_flags &= ~B_ERROR;
2962 2962 if ((pkt = dcd_init_pkt(ROUTE, NULL, bp,
2963 2963 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
2964 2964 PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) {
2965 2965 break;
2966 2966 }
2967 2967 if (i == 0) {
2968 2968 if (bp->b_flags & B_ERROR) {
2969 2969 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2970 2970 "no resources for dumping; "
2971 2971 "error code: 0x%x, retrying",
2972 2972 geterror(bp));
2973 2973 } else {
2974 2974 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2975 2975 "no resources for dumping; retrying");
2976 2976 }
2977 2977 } else if (i != (NDUMP_RETRIES - 1)) {
2978 2978 if (bp->b_flags & B_ERROR) {
2979 2979 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no "
2980 2980 "resources for dumping; error code: 0x%x, "
2981 2981 "retrying\n", geterror(bp));
2982 2982 }
2983 2983 } else {
2984 2984 if (bp->b_flags & B_ERROR) {
2985 2985 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2986 2986 "no resources for dumping; "
2987 2987 "error code: 0x%x, retries failed, "
2988 2988 "giving up.\n", geterror(bp));
2989 2989 } else {
2990 2990 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2991 2991 "no resources for dumping; "
2992 2992 "retries failed, giving up.\n");
2993 2993 }
2994 2994 return (EIO);
2995 2995 }
2996 2996 delay(10);
2997 2997 }
2998 2998 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
2999 2999 com = ATA_WRITE_DMA;
3000 3000 } else {
3001 3001 if (un->un_dp->options & BLOCK_MODE)
3002 3002 com = ATA_WRITE_MULTIPLE;
3003 3003 else
3004 3004 com = ATA_WRITE;
3005 3005 }
3006 3006
3007 3007 makecommand(pkt, 0, com, blkno, ADD_LBA_MODE,
3008 3008 (int)nblk*un->un_secsize, DATA_WRITE, 0);
3009 3009
3010 3010 for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) {
3011 3011
3012 3012 if (dcd_poll(pkt) == 0) {
3013 3013 switch (SCBP_C(pkt)) {
3014 3014 case STATUS_GOOD:
3015 3015 if (pkt->pkt_resid == 0) {
3016 3016 err = 0;
3017 3017 }
3018 3018 break;
3019 3019 case STATUS_ATA_BUSY:
3020 3020 (void) dcd_reset(ROUTE, RESET_TARGET);
3021 3021 break;
3022 3022 default:
3023 3023 mutex_enter(DCD_MUTEX);
3024 3024 (void) dcd_reset_disk(un, pkt);
3025 3025 mutex_exit(DCD_MUTEX);
3026 3026 break;
3027 3027 }
3028 3028 } else if (i > NDUMP_RETRIES/2) {
3029 3029 (void) dcd_reset(ROUTE, RESET_ALL);
3030 3030 }
3031 3031
3032 3032 }
3033 3033 dcd_destroy_pkt(pkt);
3034 3034 return (err);
3035 3035 }
3036 3036
3037 3037 /*
3038 3038 * This routine implements the ioctl calls. It is called
3039 3039 * from the device switch at normal priority.
3040 3040 */
3041 3041 /* ARGSUSED3 */
3042 3042 static int
3043 3043 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag,
3044 3044 cred_t *cred_p, int *rval_p)
3045 3045 {
3046 3046 auto int32_t data[512 / (sizeof (int32_t))];
3047 3047 struct dk_cinfo *info;
3048 3048 struct dk_minfo media_info;
3049 3049 struct udcd_cmd *scmd;
3050 3050 int i, err;
3051 3051 enum uio_seg uioseg = 0;
3052 3052 enum dkio_state state = 0;
3053 3053 #ifdef _MULTI_DATAMODEL
3054 3054 struct dadkio_rwcmd rwcmd;
3055 3055 #endif
3056 3056 struct dadkio_rwcmd32 rwcmd32;
3057 3057 struct dcd_cmd dcdcmd;
3058 3058
3059 3059 GET_SOFT_STATE(dev);
3060 3060 #ifdef lint
3061 3061 part = part;
3062 3062 state = state;
3063 3063 uioseg = uioseg;
3064 3064 #endif /* lint */
3065 3065
3066 3066 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3067 3067 "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg);
3068 3068
3069 3069 bzero((caddr_t)data, sizeof (data));
3070 3070
3071 3071 switch (cmd) {
3072 3072
3073 3073 #ifdef DCDDEBUG
3074 3074 /*
3075 3075 * Following ioctl are for testing RESET/ABORTS
3076 3076 */
3077 3077 #define DKIOCRESET (DKIOC|14)
3078 3078 #define DKIOCABORT (DKIOC|15)
3079 3079
3080 3080 case DKIOCRESET:
3081 3081 if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag))
3082 3082 return (EFAULT);
3083 3083 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3084 3084 "DKIOCRESET: data = 0x%x\n", data[0]);
3085 3085 if (dcd_reset(ROUTE, data[0])) {
3086 3086 return (0);
3087 3087 } else {
3088 3088 return (EIO);
3089 3089 }
3090 3090 case DKIOCABORT:
3091 3091 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3092 3092 "DKIOCABORT:\n");
3093 3093 if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) {
3094 3094 return (0);
3095 3095 } else {
3096 3096 return (EIO);
3097 3097 }
3098 3098 #endif
3099 3099
3100 3100 case DKIOCINFO:
3101 3101 /*
3102 3102 * Controller Information
3103 3103 */
3104 3104 info = (struct dk_cinfo *)data;
3105 3105
3106 3106 mutex_enter(DCD_MUTEX);
3107 3107 switch (un->un_dp->ctype) {
3108 3108 default:
3109 3109 info->dki_ctype = DKC_DIRECT;
3110 3110 break;
3111 3111 }
3112 3112 mutex_exit(DCD_MUTEX);
3113 3113 info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO));
3114 3114 (void) strcpy(info->dki_cname,
3115 3115 ddi_get_name(ddi_get_parent(DCD_DEVINFO)));
3116 3116 /*
3117 3117 * Unit Information
3118 3118 */
3119 3119 info->dki_unit = ddi_get_instance(DCD_DEVINFO);
3120 3120 info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3);
3121 3121 (void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO));
3122 3122 info->dki_flags = DKI_FMTVOL;
3123 3123 info->dki_partition = DCDPART(dev);
3124 3124
3125 3125 /*
3126 3126 * Max Transfer size of this device in blocks
3127 3127 */
3128 3128 info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE;
3129 3129
3130 3130 /*
3131 3131 * We can't get from here to there yet
3132 3132 */
3133 3133 info->dki_addr = 0;
3134 3134 info->dki_space = 0;
3135 3135 info->dki_prio = 0;
3136 3136 info->dki_vec = 0;
3137 3137
3138 3138 i = sizeof (struct dk_cinfo);
3139 3139 if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag))
3140 3140 return (EFAULT);
3141 3141 else
3142 3142 return (0);
3143 3143
3144 3144 case DKIOCGMEDIAINFO:
3145 3145 /*
3146 3146 * As dad target driver is used for IDE disks only
3147 3147 * Can keep the return value hardcoded to FIXED_DISK
3148 3148 */
3149 3149 media_info.dki_media_type = DK_FIXED_DISK;
3150 3150
3151 3151 mutex_enter(DCD_MUTEX);
3152 3152 media_info.dki_lbsize = un->un_lbasize;
3153 3153 media_info.dki_capacity = un->un_diskcapacity;
3154 3154 mutex_exit(DCD_MUTEX);
3155 3155
3156 3156 if (ddi_copyout(&media_info, (caddr_t)arg,
3157 3157 sizeof (struct dk_minfo), flag))
3158 3158 return (EFAULT);
3159 3159 else
3160 3160 return (0);
3161 3161
3162 3162 case DKIOCGGEOM:
3163 3163 case DKIOCGVTOC:
3164 3164 case DKIOCGETEFI:
3165 3165
3166 3166 mutex_enter(DCD_MUTEX);
3167 3167 if (un->un_ncmds == 0) {
3168 3168 if ((err = dcd_unit_ready(dev)) != 0) {
3169 3169 mutex_exit(DCD_MUTEX);
3170 3170 return (err);
3171 3171 }
3172 3172 }
3173 3173
3174 3174 mutex_exit(DCD_MUTEX);
3175 3175 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3176 3176 arg, flag, cred_p, rval_p, 0);
3177 3177 return (err);
3178 3178
3179 3179 case DKIOCGAPART:
3180 3180 case DKIOCSAPART:
3181 3181 case DKIOCSGEOM:
3182 3182 case DKIOCSVTOC:
3183 3183 case DKIOCSETEFI:
3184 3184 case DKIOCPARTITION:
3185 3185 case DKIOCPARTINFO:
3186 3186 case DKIOCGMBOOT:
3187 3187 case DKIOCSMBOOT:
3188 3188
3189 3189 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3190 3190 arg, flag, cred_p, rval_p, 0);
3191 3191 return (err);
3192 3192
3193 3193 case DIOCTL_RWCMD:
3194 3194 if (drv_priv(cred_p) != 0) {
3195 3195 return (EPERM);
3196 3196 }
3197 3197
3198 3198 #ifdef _MULTI_DATAMODEL
3199 3199 switch (ddi_model_convert_from(flag & FMODELS)) {
3200 3200 case DDI_MODEL_NONE:
3201 3201 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd,
3202 3202 sizeof (struct dadkio_rwcmd), flag)) {
3203 3203 return (EFAULT);
3204 3204 }
3205 3205 rwcmd32.cmd = rwcmd.cmd;
3206 3206 rwcmd32.flags = rwcmd.flags;
3207 3207 rwcmd32.blkaddr = rwcmd.blkaddr;
3208 3208 rwcmd32.buflen = rwcmd.buflen;
3209 3209 rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr;
3210 3210 break;
3211 3211 case DDI_MODEL_ILP32:
3212 3212 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3213 3213 sizeof (struct dadkio_rwcmd32), flag)) {
3214 3214 return (EFAULT);
3215 3215 }
3216 3216 break;
3217 3217 }
3218 3218 #else
3219 3219 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3220 3220 sizeof (struct dadkio_rwcmd32), flag)) {
3221 3221 return (EFAULT);
3222 3222 }
3223 3223 #endif
3224 3224 mutex_enter(DCD_MUTEX);
3225 3225
3226 3226 uioseg = UIO_SYSSPACE;
3227 3227 scmd = (struct udcd_cmd *)data;
3228 3228 scmd->udcd_cmd = &dcdcmd;
3229 3229 /*
3230 3230 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3231 3231 * it can take the normal path to get the io done
3232 3232 */
3233 3233 if (rwcmd32.cmd == DADKIO_RWCMD_READ) {
3234 3234 if ((un->un_dp->options & DMA_SUPPORTTED) ==
3235 3235 DMA_SUPPORTTED)
3236 3236 scmd->udcd_cmd->cmd = ATA_READ_DMA;
3237 3237 else
3238 3238 scmd->udcd_cmd->cmd = ATA_READ;
3239 3239 scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3240 3240 scmd->udcd_cmd->direction = DATA_READ;
3241 3241 scmd->udcd_flags |= UDCD_READ|UDCD_SILENT;
3242 3242 } else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) {
3243 3243 if ((un->un_dp->options & DMA_SUPPORTTED) ==
3244 3244 DMA_SUPPORTTED)
3245 3245 scmd->udcd_cmd->cmd = ATA_WRITE_DMA;
3246 3246 else
3247 3247 scmd->udcd_cmd->cmd = ATA_WRITE;
3248 3248 scmd->udcd_cmd->direction = DATA_WRITE;
3249 3249 scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT;
3250 3250 } else {
3251 3251 mutex_exit(DCD_MUTEX);
3252 3252 return (EINVAL);
3253 3253 }
3254 3254
3255 3255 scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3256 3256 scmd->udcd_cmd->features = 0;
3257 3257 scmd->udcd_cmd->size = rwcmd32.buflen;
3258 3258 scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr;
3259 3259 scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr;
3260 3260 scmd->udcd_buflen = rwcmd32.buflen;
3261 3261 scmd->udcd_timeout = (ushort_t)dcd_io_time;
3262 3262 scmd->udcd_resid = 0ULL;
3263 3263 scmd->udcd_status = 0;
3264 3264 scmd->udcd_error_reg = 0;
3265 3265 scmd->udcd_status_reg = 0;
3266 3266
3267 3267 mutex_exit(DCD_MUTEX);
3268 3268
3269 3269 i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE);
3270 3270 mutex_enter(DCD_MUTEX);
3271 3271 /*
3272 3272 * After return convert the status from scmd to
3273 3273 * dadkio_status
3274 3274 */
3275 3275 (void) dcd_translate(&(rwcmd32.status), scmd);
3276 3276 rwcmd32.status.resid = scmd->udcd_resid;
3277 3277 mutex_exit(DCD_MUTEX);
3278 3278
3279 3279 #ifdef _MULTI_DATAMODEL
3280 3280 switch (ddi_model_convert_from(flag & FMODELS)) {
3281 3281 case DDI_MODEL_NONE: {
3282 3282 int counter;
3283 3283 rwcmd.status.status = rwcmd32.status.status;
3284 3284 rwcmd.status.resid = rwcmd32.status.resid;
3285 3285 rwcmd.status.failed_blk_is_valid =
3286 3286 rwcmd32.status.failed_blk_is_valid;
3287 3287 rwcmd.status.failed_blk = rwcmd32.status.failed_blk;
3288 3288 rwcmd.status.fru_code_is_valid =
3289 3289 rwcmd32.status.fru_code_is_valid;
3290 3290 rwcmd.status.fru_code = rwcmd32.status.fru_code;
3291 3291 for (counter = 0;
3292 3292 counter < DADKIO_ERROR_INFO_LEN; counter++)
3293 3293 rwcmd.status.add_error_info[counter] =
3294 3294 rwcmd32.status.add_error_info[counter];
3295 3295 }
3296 3296 /* Copy out the result back to the user program */
3297 3297 if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg,
3298 3298 sizeof (struct dadkio_rwcmd), flag)) {
3299 3299 if (i != 0) {
3300 3300 i = EFAULT;
3301 3301 }
3302 3302 }
3303 3303 break;
3304 3304 case DDI_MODEL_ILP32:
3305 3305 /* Copy out the result back to the user program */
3306 3306 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3307 3307 sizeof (struct dadkio_rwcmd32), flag)) {
3308 3308 if (i != 0) {
3309 3309 i = EFAULT;
3310 3310 }
3311 3311 }
3312 3312 break;
3313 3313 }
3314 3314 #else
3315 3315 /* Copy out the result back to the user program */
3316 3316 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3317 3317 sizeof (struct dadkio_rwcmd32), flag)) {
3318 3318 if (i != 0)
3319 3319 i = EFAULT;
3320 3320 }
3321 3321 #endif
3322 3322 return (i);
3323 3323
3324 3324 case UDCDCMD: {
3325 3325 #ifdef _MULTI_DATAMODEL
3326 3326 /*
3327 3327 * For use when a 32 bit app makes a call into a
3328 3328 * 64 bit ioctl
3329 3329 */
3330 3330 struct udcd_cmd32 udcd_cmd_32_for_64;
3331 3331 struct udcd_cmd32 *ucmd32 = &udcd_cmd_32_for_64;
3332 3332 model_t model;
3333 3333 #endif /* _MULTI_DATAMODEL */
3334 3334
3335 3335 if (drv_priv(cred_p) != 0) {
3336 3336 return (EPERM);
3337 3337 }
3338 3338
3339 3339 scmd = (struct udcd_cmd *)data;
3340 3340
3341 3341 #ifdef _MULTI_DATAMODEL
3342 3342 switch (model = ddi_model_convert_from(flag & FMODELS)) {
3343 3343 case DDI_MODEL_ILP32:
3344 3344 if (ddi_copyin((caddr_t)arg, ucmd32,
3345 3345 sizeof (struct udcd_cmd32), flag)) {
3346 3346 return (EFAULT);
3347 3347 }
3348 3348 /*
3349 3349 * Convert the ILP32 uscsi data from the
3350 3350 * application to LP64 for internal use.
3351 3351 */
3352 3352 udcd_cmd32toudcd_cmd(ucmd32, scmd);
3353 3353 break;
3354 3354 case DDI_MODEL_NONE:
3355 3355 if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd),
3356 3356 flag)) {
3357 3357 return (EFAULT);
3358 3358 }
3359 3359 break;
3360 3360 }
3361 3361 #else /* ! _MULTI_DATAMODEL */
3362 3362 if (ddi_copyin((caddr_t)arg, (caddr_t)scmd,
3363 3363 sizeof (*scmd), flag)) {
3364 3364 return (EFAULT);
3365 3365 }
3366 3366 #endif /* ! _MULTI_DATAMODEL */
3367 3367
3368 3368 scmd->udcd_flags &= ~UDCD_NOINTR;
3369 3369 uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE;
3370 3370
3371 3371 i = dcdioctl_cmd(dev, scmd, uioseg, uioseg);
3372 3372 #ifdef _MULTI_DATAMODEL
3373 3373 switch (model) {
3374 3374 case DDI_MODEL_ILP32:
3375 3375 /*
3376 3376 * Convert back to ILP32 before copyout to the
3377 3377 * application
3378 3378 */
3379 3379 udcd_cmdtoudcd_cmd32(scmd, ucmd32);
3380 3380 if (ddi_copyout(ucmd32, (caddr_t)arg,
3381 3381 sizeof (*ucmd32), flag)) {
3382 3382 if (i != 0)
3383 3383 i = EFAULT;
3384 3384 }
3385 3385 break;
3386 3386 case DDI_MODEL_NONE:
3387 3387 if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd),
3388 3388 flag)) {
3389 3389 if (i != 0)
3390 3390 i = EFAULT;
3391 3391 }
3392 3392 break;
3393 3393 }
3394 3394 #else /* ! _MULTI_DATAMODE */
3395 3395 if (ddi_copyout((caddr_t)scmd, (caddr_t)arg,
3396 3396 sizeof (*scmd), flag)) {
3397 3397 if (i != 0)
3398 3398 i = EFAULT;
3399 3399 }
3400 3400 #endif
3401 3401 return (i);
3402 3402 }
3403 3403 case DKIOCFLUSHWRITECACHE: {
3404 3404 struct dk_callback *dkc = (struct dk_callback *)arg;
3405 3405 struct dcd_pkt *pkt;
3406 3406 struct buf *bp;
3407 3407 int is_sync = 1;
3408 3408
3409 3409 mutex_enter(DCD_MUTEX);
3410 3410 if (un->un_flush_not_supported ||
3411 3411 ! un->un_write_cache_enabled) {
3412 3412 i = un->un_flush_not_supported ? ENOTSUP : 0;
3413 3413 mutex_exit(DCD_MUTEX);
3414 3414 /*
3415 3415 * If a callback was requested: a callback will
3416 3416 * always be done if the caller saw the
3417 3417 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3418 3418 * never done if the caller saw the ioctl return
3419 3419 * an error.
3420 3420 */
3421 3421 if ((flag & FKIOCTL) && dkc != NULL &&
3422 3422 dkc->dkc_callback != NULL) {
3423 3423 (*dkc->dkc_callback)(dkc->dkc_cookie, i);
3424 3424 /*
3425 3425 * Did callback and reported error.
3426 3426 * Since we did a callback, ioctl
3427 3427 * should return 0.
3428 3428 */
3429 3429 i = 0;
3430 3430 }
3431 3431 return (i);
3432 3432 }
3433 3433
3434 3434 /*
3435 3435 * Get the special buffer
3436 3436 */
3437 3437 while (un->un_sbuf_busy) {
3438 3438 cv_wait(&un->un_sbuf_cv, DCD_MUTEX);
3439 3439 }
3440 3440 un->un_sbuf_busy = 1;
3441 3441 bp = un->un_sbufp;
3442 3442 mutex_exit(DCD_MUTEX);
3443 3443
3444 3444 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
3445 3445 NULL, (uint32_t)sizeof (struct dcd_cmd),
3446 3446 2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un);
3447 3447 ASSERT(pkt != NULL);
3448 3448
3449 3449 makecommand(pkt, un->un_cmd_flags | FLAG_SILENT,
3450 3450 ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0);
3451 3451
3452 3452 pkt->pkt_comp = dcdintr;
3453 3453 pkt->pkt_time = DCD_FLUSH_TIME;
3454 3454 PKT_SET_BP(pkt, bp);
3455 3455
3456 3456 bp->av_back = (struct buf *)pkt;
3457 3457 bp->b_forw = NULL;
3458 3458 bp->b_flags = B_BUSY;
3459 3459 bp->b_error = 0;
3460 3460 bp->b_edev = dev;
3461 3461 bp->b_dev = cmpdev(dev);
3462 3462 bp->b_bcount = 0;
3463 3463 bp->b_blkno = 0;
3464 3464 bp->b_un.b_addr = 0;
3465 3465 bp->b_iodone = NULL;
3466 3466 bp->b_list = NULL;
3467 3467 bp->b_private = NULL;
3468 3468
3469 3469 if ((flag & FKIOCTL) && dkc != NULL &&
3470 3470 dkc->dkc_callback != NULL) {
3471 3471 struct dk_callback *dkc2 = (struct dk_callback *)
3472 3472 kmem_zalloc(sizeof (*dkc2), KM_SLEEP);
3473 3473 bcopy(dkc, dkc2, sizeof (*dkc2));
3474 3474
3475 3475 bp->b_private = dkc2;
3476 3476 bp->b_iodone = dcdflushdone;
3477 3477 is_sync = 0;
3478 3478 }
3479 3479
3480 3480 (void) dcdstrategy(bp);
3481 3481
3482 3482 i = 0;
3483 3483 if (is_sync) {
3484 3484 i = biowait(bp);
3485 3485 (void) dcdflushdone(bp);
3486 3486 }
3487 3487
3488 3488 return (i);
3489 3489 }
3490 3490 default:
3491 3491 break;
3492 3492 }
3493 3493 return (ENOTTY);
3494 3494 }
3495 3495
3496 3496
3497 3497 static int
3498 3498 dcdflushdone(struct buf *bp)
3499 3499 {
3500 3500 struct dcd_disk *un = ddi_get_soft_state(dcd_state,
3501 3501 DCDUNIT(bp->b_edev));
3502 3502 struct dcd_pkt *pkt = BP_PKT(bp);
3503 3503 struct dk_callback *dkc = bp->b_private;
3504 3504
3505 3505 ASSERT(un != NULL);
3506 3506 ASSERT(bp == un->un_sbufp);
3507 3507 ASSERT(pkt != NULL);
3508 3508
3509 3509 dcd_destroy_pkt(pkt);
3510 3510 bp->av_back = NO_PKT_ALLOCATED;
3511 3511
3512 3512 if (dkc != NULL) {
3513 3513 ASSERT(bp->b_iodone != NULL);
3514 3514 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
3515 3515 kmem_free(dkc, sizeof (*dkc));
3516 3516 bp->b_iodone = NULL;
3517 3517 bp->b_private = NULL;
3518 3518 }
3519 3519
3520 3520 /*
3521 3521 * Tell anybody who cares that the buffer is now free
3522 3522 */
3523 3523 mutex_enter(DCD_MUTEX);
3524 3524 un->un_sbuf_busy = 0;
3525 3525 cv_signal(&un->un_sbuf_cv);
3526 3526 mutex_exit(DCD_MUTEX);
3527 3527 return (0);
3528 3528 }
3529 3529
3530 3530 /*
3531 3531 * dcdrunout:
3532 3532 * the callback function for resource allocation
3533 3533 *
3534 3534 * XXX it would be preferable that dcdrunout() scans the whole
3535 3535 * list for possible candidates for dcdstart(); this avoids
3536 3536 * that a bp at the head of the list whose request cannot be
3537 3537 * satisfied is retried again and again
3538 3538 */
3539 3539 /*ARGSUSED*/
3540 3540 static int
3541 3541 dcdrunout(caddr_t arg)
3542 3542 {
3543 3543 int serviced;
3544 3544 struct dcd_disk *un;
3545 3545 struct diskhd *dp;
3546 3546
3547 3547 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p",
3548 3548 arg);
3549 3549 serviced = 1;
3550 3550
3551 3551 un = (struct dcd_disk *)arg;
3552 3552 dp = &un->un_utab;
3553 3553
3554 3554 /*
3555 3555 * We now support passing a structure to the callback
3556 3556 * routine.
3557 3557 */
3558 3558 ASSERT(un != NULL);
3559 3559 mutex_enter(DCD_MUTEX);
3560 3560 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
3561 3561 dcdstart(un);
3562 3562 }
3563 3563 if (un->un_state == DCD_STATE_RWAIT) {
3564 3564 serviced = 0;
3565 3565 }
3566 3566 mutex_exit(DCD_MUTEX);
3567 3567 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END,
3568 3568 "dcdrunout_end: serviced %d", serviced);
3569 3569 return (serviced);
3570 3570 }
3571 3571
3572 3572
3573 3573 /*
3574 3574 * This routine called to see whether unit is (still) there. Must not
3575 3575 * be called when un->un_sbufp is in use, and must not be called with
3576 3576 * an unattached disk. Soft state of disk is restored to what it was
3577 3577 * upon entry- up to caller to set the correct state.
3578 3578 *
3579 3579 * We enter with the disk mutex held.
3580 3580 */
3581 3581
3582 3582 /* ARGSUSED0 */
3583 3583 static int
3584 3584 dcd_unit_ready(dev_t dev)
3585 3585 {
3586 3586 #ifndef lint
3587 3587 auto struct udcd_cmd dcmd, *com = &dcmd;
3588 3588 auto struct dcd_cmd cmdblk;
3589 3589 #endif
3590 3590 int error;
3591 3591 #ifndef lint
3592 3592 GET_SOFT_STATE(dev);
3593 3593 #endif
3594 3594
3595 3595 /*
3596 3596 * Now that we protect the special buffer with
3597 3597 * a mutex, we could probably do a mutex_tryenter
3598 3598 * on it here and return failure if it were held...
3599 3599 */
3600 3600
3601 3601 error = 0;
3602 3602 return (error);
3603 3603 }
3604 3604
3605 3605 /* ARGSUSED0 */
3606 3606 int
3607 3607 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace,
3608 3608 enum uio_seg dataspace)
3609 3609 {
3610 3610
3611 3611 struct buf *bp;
3612 3612 struct udcd_cmd *scmd;
3613 3613 struct dcd_pkt *pkt;
3614 3614 int err, rw;
3615 3615 caddr_t cdb;
3616 3616 int flags = 0;
3617 3617
3618 3618 GET_SOFT_STATE(devp);
3619 3619
3620 3620 #ifdef lint
3621 3621 part = part;
3622 3622 #endif
3623 3623
3624 3624 /*
3625 3625 * Is this a request to reset the bus?
3626 3626 * if so, we need to do reseting.
3627 3627 */
3628 3628
3629 3629 if (in->udcd_flags & UDCD_RESET) {
3630 3630 int flag = RESET_TARGET;
3631 3631 err = dcd_reset(ROUTE, flag) ? 0: EIO;
3632 3632 return (err);
3633 3633 }
3634 3634
3635 3635 scmd = in;
3636 3636
3637 3637
3638 3638 /* Do some sanity checks */
3639 3639 if (scmd->udcd_buflen <= 0) {
3640 3640 if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) {
3641 3641 return (EINVAL);
3642 3642 } else {
3643 3643 scmd->udcd_buflen = 0;
3644 3644 }
3645 3645 }
3646 3646
3647 3647 /* Make a copy of the dcd_cmd passed */
3648 3648 cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP);
3649 3649 if (cdbspace == UIO_SYSSPACE) {
3650 3650 flags |= FKIOCTL;
3651 3651 }
3652 3652
3653 3653 if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd),
3654 3654 flags)) {
3655 3655 kmem_free(cdb, sizeof (struct dcd_cmd));
3656 3656 return (EFAULT);
3657 3657 }
3658 3658 scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP);
3659 3659 bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd));
3660 3660 scmd->udcd_cmd = (struct dcd_cmd *)cdb;
3661 3661 rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE;
3662 3662
3663 3663
3664 3664 /*
3665 3665 * Get the special buffer
3666 3666 */
3667 3667
3668 3668 mutex_enter(DCD_MUTEX);
3669 3669 while (un->un_sbuf_busy) {
3670 3670 if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) {
3671 3671 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3672 3672 kmem_free((caddr_t)scmd, sizeof (*scmd));
3673 3673 mutex_exit(DCD_MUTEX);
3674 3674 return (EINTR);
3675 3675 }
3676 3676 }
3677 3677
3678 3678 un->un_sbuf_busy = 1;
3679 3679 bp = un->un_sbufp;
3680 3680 mutex_exit(DCD_MUTEX);
3681 3681
3682 3682
3683 3683 /*
3684 3684 * If we are going to do actual I/O, let physio do all the
3685 3685 * things
3686 3686 */
3687 3687 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3688 3688 "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen);
3689 3689
3690 3690 if (scmd->udcd_buflen) {
3691 3691 auto struct iovec aiov;
3692 3692 auto struct uio auio;
3693 3693 struct uio *uio = &auio;
3694 3694
3695 3695 bzero((caddr_t)&auio, sizeof (struct uio));
3696 3696 bzero((caddr_t)&aiov, sizeof (struct iovec));
3697 3697
3698 3698 aiov.iov_base = scmd->udcd_bufaddr;
3699 3699 aiov.iov_len = scmd->udcd_buflen;
3700 3700
3701 3701 uio->uio_iov = &aiov;
3702 3702 uio->uio_iovcnt = 1;
3703 3703 uio->uio_resid = scmd->udcd_buflen;
3704 3704 uio->uio_segflg = dataspace;
3705 3705
3706 3706 /*
3707 3707 * Let physio do the rest...
3708 3708 */
3709 3709 bp->av_back = NO_PKT_ALLOCATED;
3710 3710 bp->b_forw = (struct buf *)scmd;
3711 3711 err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio);
3712 3712 } else {
3713 3713 /*
3714 3714 * We have to mimic what physio would do here.
3715 3715 */
3716 3716 bp->av_back = NO_PKT_ALLOCATED;
3717 3717 bp->b_forw = (struct buf *)scmd;
3718 3718 bp->b_flags = B_BUSY | rw;
3719 3719 bp->b_edev = devp;
3720 3720 bp->b_dev = cmpdev(devp);
3721 3721 bp->b_bcount = bp->b_blkno = 0;
3722 3722 (void) dcdstrategy(bp);
3723 3723 err = biowait(bp);
3724 3724 }
3725 3725
3726 3726 done:
3727 3727 if ((pkt = BP_PKT(bp)) != NULL) {
3728 3728 bp->av_back = NO_PKT_ALLOCATED;
3729 3729 /* we need to update the completion status of udcd command */
3730 3730 in->udcd_resid = bp->b_resid;
3731 3731 in->udcd_status_reg = SCBP_C(pkt);
3732 3732 /* XXX: we need to give error_reg also */
3733 3733 dcd_destroy_pkt(pkt);
3734 3734 }
3735 3735 /*
3736 3736 * Tell anybody who cares that the buffer is now free
3737 3737 */
3738 3738 mutex_enter(DCD_MUTEX);
3739 3739 un->un_sbuf_busy = 0;
3740 3740 cv_signal(&un->un_sbuf_cv);
3741 3741 mutex_exit(DCD_MUTEX);
3742 3742
3743 3743 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3744 3744 kmem_free((caddr_t)scmd, sizeof (*scmd));
3745 3745 return (err);
3746 3746 }
3747 3747
3748 3748 static void
3749 3749 dcdudcdmin(struct buf *bp)
3750 3750 {
3751 3751
3752 3752 #ifdef lint
3753 3753 bp = bp;
3754 3754 #endif
3755 3755
3756 3756 }
3757 3757
3758 3758 /*
3759 3759 * restart a cmd from timeout() context
3760 3760 *
3761 3761 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3762 3762 * a restart timeout request has been issued and no new timeouts should
3763 3763 * be requested. b_forw is reset when the cmd eventually completes in
3764 3764 * dcddone_and_mutex_exit()
3765 3765 */
3766 3766 void
3767 3767 dcdrestart(void *arg)
3768 3768 {
3769 3769 struct dcd_disk *un = (struct dcd_disk *)arg;
3770 3770 struct buf *bp;
3771 3771 int status;
3772 3772
3773 3773 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n");
3774 3774
3775 3775 mutex_enter(DCD_MUTEX);
3776 3776 bp = un->un_utab.b_forw;
3777 3777 if (bp) {
3778 3778 un->un_ncmds++;
3779 3779 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
3780 3780 }
3781 3781
3782 3782
3783 3783 if (bp) {
3784 3784 struct dcd_pkt *pkt = BP_PKT(bp);
3785 3785
3786 3786 mutex_exit(DCD_MUTEX);
3787 3787
3788 3788 pkt->pkt_flags = 0;
3789 3789
3790 3790 if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) {
3791 3791 mutex_enter(DCD_MUTEX);
3792 3792 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
3793 3793 un->un_ncmds--;
3794 3794 if (status == TRAN_BUSY) {
3795 3795 /* XXX : To be checked */
3796 3796 /*
3797 3797 * if (un->un_throttle > 1) {
3798 3798 * ASSERT(un->un_ncmds >= 0);
3799 3799 * un->un_throttle = un->un_ncmds;
3800 3800 * }
3801 3801 */
3802 3802 un->un_reissued_timeid =
3803 3803 timeout(dcdrestart, (caddr_t)un,
3804 3804 DCD_BSY_TIMEOUT/500);
3805 3805 mutex_exit(DCD_MUTEX);
3806 3806 return;
3807 3807 }
3808 3808 DCD_DO_ERRSTATS(un, dcd_transerrs);
3809 3809 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3810 3810 "dcdrestart transport failed (%x)\n", status);
3811 3811 bp->b_resid = bp->b_bcount;
3812 3812 SET_BP_ERROR(bp, EIO);
3813 3813
3814 3814 DCD_DO_KSTATS(un, kstat_waitq_exit, bp);
3815 3815 un->un_reissued_timeid = 0L;
3816 3816 dcddone_and_mutex_exit(un, bp);
3817 3817 return;
3818 3818 }
3819 3819 mutex_enter(DCD_MUTEX);
3820 3820 }
3821 3821 un->un_reissued_timeid = 0L;
3822 3822 mutex_exit(DCD_MUTEX);
3823 3823 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n");
3824 3824 }
3825 3825
3826 3826 /*
3827 3827 * This routine gets called to reset the throttle to its saved
3828 3828 * value wheneven we lower the throttle.
3829 3829 */
3830 3830 void
3831 3831 dcd_reset_throttle(caddr_t arg)
3832 3832 {
3833 3833 struct dcd_disk *un = (struct dcd_disk *)arg;
3834 3834 struct diskhd *dp;
3835 3835
3836 3836 mutex_enter(DCD_MUTEX);
3837 3837 dp = &un->un_utab;
3838 3838
3839 3839 /*
3840 3840 * start any commands that didn't start while throttling.
3841 3841 */
3842 3842 if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
3843 3843 (dp->b_forw == NULL)) {
3844 3844 dcdstart(un);
3845 3845 }
3846 3846 mutex_exit(DCD_MUTEX);
3847 3847 }
3848 3848
3849 3849
3850 3850 /*
3851 3851 * This routine handles the case when a TRAN_BUSY is
3852 3852 * returned by HBA.
3853 3853 *
3854 3854 * If there are some commands already in the transport, the
3855 3855 * bp can be put back on queue and it will
3856 3856 * be retried when the queue is emptied after command
3857 3857 * completes. But if there is no command in the tranport
3858 3858 * and it still return busy, we have to retry the command
3859 3859 * after some time like 10ms.
3860 3860 */
3861 3861 /* ARGSUSED0 */
3862 3862 static void
3863 3863 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un)
3864 3864 {
3865 3865 ASSERT(mutex_owned(DCD_MUTEX));
3866 3866
3867 3867
3868 3868 if (dp->b_forw == NULL || dp->b_forw == bp) {
3869 3869 dp->b_forw = bp;
3870 3870 } else if (dp->b_forw != bp) {
3871 3871 bp->b_actf = dp->b_actf;
3872 3872 dp->b_actf = bp;
3873 3873
3874 3874 }
3875 3875 if (!un->un_reissued_timeid) {
3876 3876 un->un_reissued_timeid =
3877 3877 timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500);
3878 3878 }
3879 3879 }
3880 3880
3881 3881 static int
3882 3882 dcd_write_deviceid(struct dcd_disk *un)
3883 3883 {
3884 3884
3885 3885 int status;
3886 3886 diskaddr_t blk;
3887 3887 struct udcd_cmd ucmd;
3888 3888 struct dcd_cmd cdb;
3889 3889 struct dk_devid *dkdevid;
3890 3890 uint_t *ip, chksum;
3891 3891 int i;
3892 3892 dev_t dev;
3893 3893
3894 3894 mutex_exit(DCD_MUTEX);
3895 3895 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3896 3896 mutex_enter(DCD_MUTEX);
3897 3897 return (EINVAL);
3898 3898 }
3899 3899 mutex_enter(DCD_MUTEX);
3900 3900
3901 3901 /* Allocate the buffer */
3902 3902 dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP);
3903 3903
3904 3904 /* Fill in the revision */
3905 3905 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
3906 3906 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
3907 3907
3908 3908 /* Copy in the device id */
3909 3909 bcopy(un->un_devid, &dkdevid->dkd_devid,
3910 3910 ddi_devid_sizeof(un->un_devid));
3911 3911
3912 3912 /* Calculate the chksum */
3913 3913 chksum = 0;
3914 3914 ip = (uint_t *)dkdevid;
3915 3915 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3916 3916 chksum ^= ip[i];
3917 3917
3918 3918 /* Fill in the checksum */
3919 3919 DKD_FORMCHKSUM(chksum, dkdevid);
3920 3920
3921 3921 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3922 3922 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
3923 3923
3924 3924 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3925 3925 cdb.cmd = ATA_WRITE_DMA;
3926 3926 } else {
3927 3927 if (un->un_dp->options & BLOCK_MODE)
3928 3928 cdb.cmd = ATA_WRITE_MULTIPLE;
3929 3929 else
3930 3930 cdb.cmd = ATA_WRITE;
3931 3931 }
3932 3932 cdb.size = un->un_secsize;
3933 3933 cdb.sector_num.lba_num = blk;
3934 3934 cdb.address_mode = ADD_LBA_MODE;
3935 3935 cdb.direction = DATA_WRITE;
3936 3936
3937 3937 ucmd.udcd_flags = UDCD_WRITE;
3938 3938 ucmd.udcd_cmd = &cdb;
3939 3939 ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3940 3940 ucmd.udcd_buflen = un->un_secsize;
3941 3941 ucmd.udcd_flags |= UDCD_SILENT;
3942 3942 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3943 3943 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3944 3944 mutex_exit(DCD_MUTEX);
3945 3945 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3946 3946 mutex_enter(DCD_MUTEX);
3947 3947
3948 3948 kmem_free(dkdevid, un->un_secsize);
3949 3949 return (status);
3950 3950 }
3951 3951
3952 3952 static int
3953 3953 dcd_read_deviceid(struct dcd_disk *un)
3954 3954 {
3955 3955 int status;
3956 3956 diskaddr_t blk;
3957 3957 struct udcd_cmd ucmd;
3958 3958 struct dcd_cmd cdb;
3959 3959 struct dk_devid *dkdevid;
3960 3960 uint_t *ip;
3961 3961 int chksum;
3962 3962 int i, sz;
3963 3963 dev_t dev;
3964 3964
3965 3965 mutex_exit(DCD_MUTEX);
3966 3966 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3967 3967 mutex_enter(DCD_MUTEX);
3968 3968 return (EINVAL);
3969 3969 }
3970 3970 mutex_enter(DCD_MUTEX);
3971 3971
3972 3972 dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP);
3973 3973
3974 3974 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3975 3975 (void) bzero((caddr_t)&cdb, sizeof (cdb));
3976 3976
3977 3977 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3978 3978 cdb.cmd = ATA_READ_DMA;
3979 3979 } else {
3980 3980 if (un->un_dp->options & BLOCK_MODE)
3981 3981 cdb.cmd = ATA_READ_MULTIPLE;
3982 3982 else
3983 3983 cdb.cmd = ATA_READ;
3984 3984 }
3985 3985 cdb.size = un->un_secsize;
3986 3986 cdb.sector_num.lba_num = blk;
3987 3987 cdb.address_mode = ADD_LBA_MODE;
3988 3988 cdb.direction = DATA_READ;
3989 3989
3990 3990 ucmd.udcd_flags = UDCD_READ;
3991 3991 ucmd.udcd_cmd = &cdb;
3992 3992 ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3993 3993 ucmd.udcd_buflen = un->un_secsize;
3994 3994 ucmd.udcd_flags |= UDCD_SILENT;
3995 3995 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3996 3996 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3997 3997 mutex_exit(DCD_MUTEX);
3998 3998 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3999 3999 mutex_enter(DCD_MUTEX);
4000 4000
4001 4001 if (status != 0) {
4002 4002 kmem_free((caddr_t)dkdevid, un->un_secsize);
4003 4003 return (status);
4004 4004 }
4005 4005
4006 4006 /* Validate the revision */
4007 4007
4008 4008 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
4009 4009 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
4010 4010 kmem_free((caddr_t)dkdevid, un->un_secsize);
4011 4011 return (EINVAL);
4012 4012 }
4013 4013
4014 4014 /* Calculate the checksum */
4015 4015 chksum = 0;
4016 4016 ip = (uint_t *)dkdevid;
4017 4017 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
4018 4018 chksum ^= ip[i];
4019 4019
4020 4020 /* Compare the checksums */
4021 4021
4022 4022 if (DKD_GETCHKSUM(dkdevid) != chksum) {
4023 4023 kmem_free((caddr_t)dkdevid, un->un_secsize);
4024 4024 return (EINVAL);
4025 4025 }
4026 4026
4027 4027 /* VAlidate the device id */
4028 4028 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
4029 4029 kmem_free((caddr_t)dkdevid, un->un_secsize);
4030 4030 return (EINVAL);
4031 4031 }
4032 4032
4033 4033 /* return a copy of the device id */
4034 4034 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
4035 4035 un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP);
4036 4036 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
4037 4037 kmem_free((caddr_t)dkdevid, un->un_secsize);
4038 4038
4039 4039 return (0);
4040 4040 }
4041 4041
4042 4042 /*
4043 4043 * Return the device id for the device.
4044 4044 * 1. If the device ID exists then just return it - nothing to do in that case.
4045 4045 * 2. Build one from the drives model number and serial number.
4046 4046 * 3. If there is a problem in building it from serial/model #, then try
4047 4047 * to read it from the acyl region of the disk.
4048 4048 * Note: If this function is unable to return a valid ID then the calling
4049 4049 * point will invoke the routine to create a fabricated ID ans stor it on the
4050 4050 * acyl region of the disk.
4051 4051 */
4052 4052 static ddi_devid_t
4053 4053 dcd_get_devid(struct dcd_disk *un)
4054 4054 {
4055 4055 int rc;
4056 4056
4057 4057 /* If already registered, return that value */
4058 4058 if (un->un_devid != NULL)
4059 4059 return (un->un_devid);
4060 4060
4061 4061 /* Build a devid from model and serial number, if present */
4062 4062 rc = dcd_make_devid_from_serial(un);
4063 4063
4064 4064 if (rc != DDI_SUCCESS) {
4065 4065 /* Read the devid from the disk. */
4066 4066 if (dcd_read_deviceid(un))
4067 4067 return (NULL);
4068 4068 }
4069 4069
4070 4070 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4071 4071 return (un->un_devid);
4072 4072 }
4073 4073
4074 4074
4075 4075 static ddi_devid_t
4076 4076 dcd_create_devid(struct dcd_disk *un)
4077 4077 {
4078 4078 if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *)
4079 4079 &un->un_devid) == DDI_FAILURE)
4080 4080 return (NULL);
4081 4081
4082 4082 if (dcd_write_deviceid(un)) {
4083 4083 ddi_devid_free(un->un_devid);
4084 4084 un->un_devid = NULL;
4085 4085 return (NULL);
4086 4086 }
4087 4087
4088 4088 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4089 4089 return (un->un_devid);
4090 4090 }
4091 4091
4092 4092 /*
4093 4093 * Build a devid from the model and serial number, if present
4094 4094 * Return DDI_SUCCESS or DDI_FAILURE.
4095 4095 */
4096 4096 static int
4097 4097 dcd_make_devid_from_serial(struct dcd_disk *un)
4098 4098 {
4099 4099 int rc = DDI_SUCCESS;
4100 4100 char *hwid;
4101 4101 char *model;
4102 4102 int model_len;
4103 4103 char *serno;
4104 4104 int serno_len;
4105 4105 int total_len;
4106 4106
4107 4107 /* initialize the model and serial number information */
4108 4108 model = un->un_dcd->dcd_ident->dcd_model;
4109 4109 model_len = DCD_MODEL_NUMBER_LENGTH;
4110 4110 serno = un->un_dcd->dcd_ident->dcd_drvser;
4111 4111 serno_len = DCD_SERIAL_NUMBER_LENGTH;
4112 4112
4113 4113 /* Verify the model and serial number */
4114 4114 dcd_validate_model_serial(model, &model_len, model_len);
4115 4115 if (model_len == 0) {
4116 4116 rc = DDI_FAILURE;
4117 4117 goto out;
4118 4118 }
4119 4119 dcd_validate_model_serial(serno, &serno_len, serno_len);
4120 4120 if (serno_len == 0) {
4121 4121 rc = DDI_FAILURE;
4122 4122 goto out;
4123 4123 }
4124 4124
4125 4125 /*
4126 4126 * The device ID will be concatenation of the model number,
4127 4127 * the '=' separator, the serial number. Allocate
4128 4128 * the string and concatenate the components.
4129 4129 */
4130 4130 total_len = model_len + 1 + serno_len;
4131 4131 hwid = kmem_alloc(total_len, KM_SLEEP);
4132 4132 bcopy((caddr_t)model, (caddr_t)hwid, model_len);
4133 4133 bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1);
4134 4134 bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len);
4135 4135
4136 4136 /* Initialize the device ID, trailing NULL not included */
4137 4137 rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len,
4138 4138 hwid, (ddi_devid_t *)&un->un_devid);
4139 4139
4140 4140 /* Free the allocated string */
4141 4141 kmem_free(hwid, total_len);
4142 4142
4143 4143 out: return (rc);
4144 4144 }
4145 4145
4146 4146 /*
4147 4147 * Test for a valid model or serial number. Assume that a valid representation
4148 4148 * contains at least one character that is neither a space, 0 digit, or NULL.
4149 4149 * Trim trailing blanks and NULLS from returned length.
4150 4150 */
4151 4151 static void
4152 4152 dcd_validate_model_serial(char *str, int *retlen, int totallen)
4153 4153 {
4154 4154 char ch;
4155 4155 boolean_t ret = B_FALSE;
4156 4156 int i;
4157 4157 int tb;
4158 4158
4159 4159 for (i = 0, tb = 0; i < totallen; i++) {
4160 4160 ch = *str++;
4161 4161 if ((ch != ' ') && (ch != '\0') && (ch != '0'))
4162 4162 ret = B_TRUE;
4163 4163 if ((ch == ' ') || (ch == '\0'))
4164 4164 tb++;
4165 4165 else
4166 4166 tb = 0;
4167 4167 }
4168 4168
4169 4169 if (ret == B_TRUE) {
4170 4170 /* Atleast one non 0 or blank character. */
4171 4171 *retlen = totallen - tb;
4172 4172 } else {
4173 4173 *retlen = 0;
4174 4174 }
4175 4175 }
4176 4176
4177 4177 #ifndef lint
4178 4178 void
4179 4179 clean_print(dev_info_t *dev, char *label, uint_t level,
4180 4180 char *title, char *data, int len)
4181 4181 {
4182 4182 int i;
4183 4183 char buf[256];
4184 4184
4185 4185 (void) sprintf(buf, "%s:", title);
4186 4186 for (i = 0; i < len; i++) {
4187 4187 (void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff));
4188 4188 }
4189 4189 (void) sprintf(&buf[strlen(buf)], "\n");
4190 4190
4191 4191 dcd_log(dev, label, level, "%s", buf);
4192 4192 }
4193 4193 #endif /* Not lint */
4194 4194
4195 4195 #ifndef lint
4196 4196 /*
4197 4197 * Print a piece of inquiry data- cleaned up for non-printable characters
4198 4198 * and stopping at the first space character after the beginning of the
4199 4199 * passed string;
4200 4200 */
4201 4201
4202 4202 void
4203 4203 inq_fill(char *p, int l, char *s)
4204 4204 {
4205 4205 unsigned i = 0;
4206 4206 char c;
4207 4207
4208 4208 while (i++ < l) {
4209 4209 if ((c = *p++) < ' ' || c >= 0177) {
4210 4210 c = '*';
4211 4211 } else if (i != 1 && c == ' ') {
4212 4212 break;
4213 4213 }
4214 4214 *s++ = c;
4215 4215 }
4216 4216 *s++ = 0;
4217 4217 }
4218 4218 #endif /* Not lint */
4219 4219
4220 4220 char *
4221 4221 dcd_sname(uchar_t status)
4222 4222 {
4223 4223 switch (status & STATUS_ATA_MASK) {
4224 4224 case STATUS_GOOD:
4225 4225 return ("good status");
4226 4226
4227 4227 case STATUS_ATA_BUSY:
4228 4228 return ("busy");
4229 4229
4230 4230 default:
4231 4231 return ("<unknown status>");
4232 4232 }
4233 4233 }
4234 4234
4235 4235 /* ARGSUSED0 */
4236 4236 char *
4237 4237 dcd_rname(int reason)
4238 4238 {
4239 4239 static char *rnames[] = {
4240 4240 "cmplt",
4241 4241 "incomplete",
4242 4242 "dma_derr",
4243 4243 "tran_err",
4244 4244 "reset",
4245 4245 "aborted",
4246 4246 "timeout",
4247 4247 "data_ovr",
4248 4248 };
4249 4249 if (reason > CMD_DATA_OVR) {
4250 4250 return ("<unknown reason>");
4251 4251 } else {
4252 4252 return (rnames[reason]);
4253 4253 }
4254 4254 }
4255 4255
4256 4256
4257 4257
4258 4258 /* ARGSUSED0 */
4259 4259 int
4260 4260 dcd_check_wp(dev_t dev)
4261 4261 {
4262 4262
4263 4263 return (0);
4264 4264 }
4265 4265
4266 4266 /*
4267 4267 * Create device error kstats
4268 4268 */
4269 4269 static int
4270 4270 dcd_create_errstats(struct dcd_disk *un, int instance)
4271 4271 {
4272 4272
4273 4273 char kstatname[KSTAT_STRLEN];
4274 4274
4275 4275 if (un->un_errstats == (kstat_t *)0) {
4276 4276 (void) sprintf(kstatname, "dad%d,error", instance);
4277 4277 un->un_errstats = kstat_create("daderror", instance, kstatname,
4278 4278 "device_error", KSTAT_TYPE_NAMED,
4279 4279 sizeof (struct dcd_errstats)/ sizeof (kstat_named_t),
4280 4280 KSTAT_FLAG_PERSISTENT);
4281 4281
4282 4282 if (un->un_errstats) {
4283 4283 struct dcd_errstats *dtp;
4284 4284
4285 4285 dtp = (struct dcd_errstats *)un->un_errstats->ks_data;
4286 4286 kstat_named_init(&dtp->dcd_softerrs, "Soft Errors",
4287 4287 KSTAT_DATA_UINT32);
4288 4288 kstat_named_init(&dtp->dcd_harderrs, "Hard Errors",
4289 4289 KSTAT_DATA_UINT32);
4290 4290 kstat_named_init(&dtp->dcd_transerrs,
4291 4291 "Transport Errors", KSTAT_DATA_UINT32);
4292 4292 kstat_named_init(&dtp->dcd_model, "Model",
4293 4293 KSTAT_DATA_CHAR);
4294 4294 kstat_named_init(&dtp->dcd_revision, "Revision",
4295 4295 KSTAT_DATA_CHAR);
4296 4296 kstat_named_init(&dtp->dcd_serial, "Serial No",
4297 4297 KSTAT_DATA_CHAR);
4298 4298 kstat_named_init(&dtp->dcd_capacity, "Size",
4299 4299 KSTAT_DATA_ULONGLONG);
4300 4300 kstat_named_init(&dtp->dcd_rq_media_err, "Media Error",
4301 4301 KSTAT_DATA_UINT32);
4302 4302 kstat_named_init(&dtp->dcd_rq_ntrdy_err,
4303 4303 "Device Not Ready", KSTAT_DATA_UINT32);
4304 4304 kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device",
4305 4305 KSTAT_DATA_UINT32);
4306 4306 kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable",
4307 4307 KSTAT_DATA_UINT32);
4308 4308 kstat_named_init(&dtp->dcd_rq_illrq_err,
4309 4309 "Illegal Request", KSTAT_DATA_UINT32);
4310 4310
4311 4311 un->un_errstats->ks_private = un;
4312 4312 un->un_errstats->ks_update = nulldev;
4313 4313 kstat_install(un->un_errstats);
4314 4314
4315 4315 (void) strncpy(&dtp->dcd_model.value.c[0],
4316 4316 un->un_dcd->dcd_ident->dcd_model, 16);
4317 4317 (void) strncpy(&dtp->dcd_serial.value.c[0],
4318 4318 un->un_dcd->dcd_ident->dcd_drvser, 16);
4319 4319 (void) strncpy(&dtp->dcd_revision.value.c[0],
4320 4320 un->un_dcd->dcd_ident->dcd_fw, 8);
4321 4321 dtp->dcd_capacity.value.ui64 =
4322 4322 (uint64_t)((uint64_t)un->un_diskcapacity *
4323 4323 (uint64_t)un->un_lbasize);
4324 4324 }
4325 4325 }
4326 4326 return (0);
4327 4327 }
4328 4328
4329 4329
4330 4330 /*
4331 4331 * This has been moved from DADA layer as this does not do anything other than
4332 4332 * retrying the command when it is busy or it does not complete
4333 4333 */
4334 4334 int
4335 4335 dcd_poll(struct dcd_pkt *pkt)
4336 4336 {
4337 4337 int busy_count, rval = -1, savef;
4338 4338 clock_t savet;
4339 4339 void (*savec)();
4340 4340
4341 4341
4342 4342 /*
4343 4343 * Save old flags
4344 4344 */
4345 4345 savef = pkt->pkt_flags;
4346 4346 savec = pkt->pkt_comp;
4347 4347 savet = pkt->pkt_time;
4348 4348
4349 4349 pkt->pkt_flags |= FLAG_NOINTR;
4350 4350
4351 4351
4352 4352 /*
4353 4353 * Set the Pkt_comp to NULL
4354 4354 */
4355 4355
4356 4356 pkt->pkt_comp = 0;
4357 4357
4358 4358 /*
4359 4359 * Set the Pkt time for the polled command
4360 4360 */
4361 4361 if (pkt->pkt_time == 0) {
4362 4362 pkt->pkt_time = DCD_POLL_TIMEOUT;
4363 4363 }
4364 4364
4365 4365
4366 4366 /* Now transport the command */
4367 4367 for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) {
4368 4368 if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) {
4369 4369 if (pkt->pkt_reason == CMD_INCOMPLETE &&
4370 4370 pkt->pkt_state == 0) {
4371 4371 delay(100);
4372 4372 } else if (pkt->pkt_reason == CMD_CMPLT) {
4373 4373 rval = 0;
4374 4374 break;
4375 4375 }
4376 4376 }
4377 4377 if (rval == TRAN_BUSY) {
4378 4378 delay(100);
4379 4379 continue;
4380 4380 }
4381 4381 }
4382 4382
4383 4383 pkt->pkt_flags = savef;
4384 4384 pkt->pkt_comp = savec;
4385 4385 pkt->pkt_time = savet;
4386 4386 return (rval);
4387 4387 }
4388 4388
4389 4389
4390 4390 void
4391 4391 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp)
4392 4392 {
4393 4393 if (cmdp->udcd_status_reg & STATUS_ATA_BUSY)
4394 4394 statp->status = DADKIO_STAT_NOT_READY;
4395 4395 else if (cmdp->udcd_status_reg & STATUS_ATA_DWF)
4396 4396 statp->status = DADKIO_STAT_HARDWARE_ERROR;
4397 4397 else if (cmdp->udcd_status_reg & STATUS_ATA_CORR)
4398 4398 statp->status = DADKIO_STAT_SOFT_ERROR;
4399 4399 else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) {
4400 4400 /*
4401 4401 * The error register is valid only when BSY and DRQ not set
4402 4402 * Assumed that HBA has checked this before it gives the data
4403 4403 */
4404 4404 if (cmdp->udcd_error_reg & ERR_AMNF)
4405 4405 statp->status = DADKIO_STAT_NOT_FORMATTED;
4406 4406 else if (cmdp->udcd_error_reg & ERR_TKONF)
4407 4407 statp->status = DADKIO_STAT_NOT_FORMATTED;
4408 4408 else if (cmdp->udcd_error_reg & ERR_ABORT)
4409 4409 statp->status = DADKIO_STAT_ILLEGAL_REQUEST;
4410 4410 else if (cmdp->udcd_error_reg & ERR_IDNF)
4411 4411 statp->status = DADKIO_STAT_NOT_FORMATTED;
4412 4412 else if (cmdp->udcd_error_reg & ERR_UNC)
4413 4413 statp->status = DADKIO_STAT_BUS_ERROR;
4414 4414 else if (cmdp->udcd_error_reg & ERR_BBK)
4415 4415 statp->status = DADKIO_STAT_MEDIUM_ERROR;
4416 4416 } else
4417 4417 statp->status = DADKIO_STAT_NO_ERROR;
4418 4418 }
4419 4419
4420 4420 static void
4421 4421 dcd_flush_cache(struct dcd_disk *un)
4422 4422 {
4423 4423 struct dcd_pkt *pkt;
4424 4424 int retry_count;
4425 4425
4426 4426
4427 4427 if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL,
4428 4428 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4429 4429 PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) {
4430 4430 return;
4431 4431 }
4432 4432
4433 4433 makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0,
4434 4434 NO_DATA_XFER, 0);
4435 4435
4436 4436 /*
4437 4437 * Send the command. There are chances it might fail on some
4438 4438 * disks since it is not a mandatory command as per ata-4. Try
4439 4439 * 3 times if it fails. The retry count has been randomly selected.
4440 4440 * There is a need for retry since as per the spec FLUSH CACHE can fail
4441 4441 * as a result of unrecoverable error encountered during execution
4442 4442 * of writing data and subsequent command should continue flushing
4443 4443 * cache.
4444 4444 */
4445 4445 for (retry_count = 0; retry_count < 3; retry_count++) {
4446 4446 /*
4447 4447 * Set the packet fields.
4448 4448 */
4449 4449 pkt->pkt_comp = 0;
4450 4450 pkt->pkt_time = DCD_POLL_TIMEOUT;
4451 4451 pkt->pkt_flags |= FLAG_FORCENOINTR;
4452 4452 pkt->pkt_flags |= FLAG_NOINTR;
4453 4453 if (dcd_transport(pkt) == TRAN_ACCEPT) {
4454 4454 if (pkt->pkt_reason == CMD_CMPLT) {
4455 4455 break;
4456 4456 }
4457 4457 }
4458 4458 /*
4459 4459 * Note the wait time value of 100ms is same as in the
4460 4460 * dcd_poll routine.
4461 4461 */
4462 4462 drv_usecwait(1000000);
4463 4463 }
4464 4464 (void) dcd_destroy_pkt(pkt);
4465 4465 }
4466 4466
4467 4467 static int
4468 4468 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr,
4469 4469 diskaddr_t start_block, size_t reqlength, uchar_t cmd)
4470 4470 {
4471 4471 struct dcd_pkt *pkt;
4472 4472 struct buf *bp;
4473 4473 diskaddr_t real_addr = start_block;
4474 4474 size_t buffer_size = reqlength;
4475 4475 uchar_t command, tmp;
4476 4476 int i, rval = 0;
4477 4477 struct dcd_disk *un;
4478 4478
4479 4479 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4480 4480 if (un == NULL)
4481 4481 return (ENXIO);
4482 4482
4483 4483 bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL,
4484 4484 buffer_size, B_READ, NULL_FUNC, NULL);
4485 4485 if (!bp) {
4486 4486 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4487 4487 "no bp for disk label\n");
4488 4488 return (ENOMEM);
4489 4489 }
4490 4490
4491 4491 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
4492 4492 bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4493 4493 PKT_CONSISTENT, NULL_FUNC, NULL);
4494 4494
4495 4495 if (!pkt) {
4496 4496 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4497 4497 "no memory for disk label\n");
4498 4498 dcd_free_consistent_buf(bp);
4499 4499 return (ENOMEM);
4500 4500 }
4501 4501
4502 4502 if (cmd == TG_READ) {
4503 4503 bzero(bp->b_un.b_addr, buffer_size);
4504 4504 tmp = DATA_READ;
4505 4505 } else {
4506 4506 bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size);
4507 4507 tmp = DATA_WRITE;
4508 4508 }
4509 4509
4510 4510 mutex_enter(DCD_MUTEX);
4511 4511 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4512 4512 if (cmd == TG_READ) {
4513 4513 command = ATA_READ_DMA;
4514 4514 } else {
4515 4515 command = ATA_WRITE_DMA;
4516 4516 }
4517 4517 } else {
4518 4518 if (cmd == TG_READ) {
4519 4519 if (un->un_dp->options & BLOCK_MODE)
4520 4520 command = ATA_READ_MULTIPLE;
4521 4521 else
4522 4522 command = ATA_READ;
4523 4523 } else {
4524 4524 if (un->un_dp->options & BLOCK_MODE)
4525 4525 command = ATA_READ_MULTIPLE;
4526 4526 else
4527 4527 command = ATA_WRITE;
4528 4528 }
4529 4529 }
4530 4530 mutex_exit(DCD_MUTEX);
4531 4531 (void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE,
4532 4532 buffer_size, tmp, 0);
4533 4533
4534 4534 for (i = 0; i < 3; i++) {
4535 4535 if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD ||
4536 4536 (pkt->pkt_state & STATE_XFERRED_DATA) == 0 ||
4537 4537 (pkt->pkt_resid != 0)) {
4538 4538 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4539 4539 "Status %x, state %x, resid %lx\n",
4540 4540 SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid);
4541 4541 rval = EIO;
4542 4542 } else {
4543 4543 break;
4544 4544 }
4545 4545 }
4546 4546
4547 4547 if (rval != 0) {
4548 4548 dcd_destroy_pkt(pkt);
4549 4549 dcd_free_consistent_buf(bp);
4550 4550 return (EIO);
4551 4551 }
4552 4552
4553 4553 if (cmd == TG_READ) {
4554 4554 bcopy(bp->b_un.b_addr, bufaddr, reqlength);
4555 4555 rval = 0;
4556 4556 }
4557 4557
4558 4558 dcd_destroy_pkt(pkt);
4559 4559 dcd_free_consistent_buf(bp);
4560 4560 return (rval);
4561 4561 }
4562 4562
4563 4563 static int dcd_compute_dk_capacity(struct dcd_device *devp,
4564 4564 diskaddr_t *capacity)
4565 4565 {
4566 4566 diskaddr_t cap;
4567 4567 diskaddr_t no_of_lbasec;
4568 4568
4569 4569 cap = devp->dcd_ident->dcd_fixcyls *
4570 4570 devp->dcd_ident->dcd_heads *
4571 4571 devp->dcd_ident->dcd_sectors;
4572 4572 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4573 4573 no_of_lbasec = no_of_lbasec << 16;
4574 4574 no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0];
4575 4575
4576 4576 if (no_of_lbasec > cap) {
4577 4577 cap = no_of_lbasec;
4578 4578 }
4579 4579
4580 4580 if (cap != ((uint32_t)-1))
4581 4581 *capacity = cap;
4582 4582 else
4583 4583 return (EINVAL);
4584 4584 return (0);
4585 4585 }
4586 4586
4587 4587 /*ARGSUSED5*/
4588 4588 static int
4589 4589 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
4590 4590 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
4591 4591 {
4592 4592 if (cmd != TG_READ && cmd != TG_WRITE)
4593 4593 return (EINVAL);
4594 4594
4595 4595 return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block,
4596 4596 reqlength, cmd));
4597 4597 }
4598 4598
4599 4599 static int
4600 4600 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp)
4601 4601 {
4602 4602 struct dcd_device *devp;
4603 4603 uint32_t no_of_lbasec, capacity, calculated_cylinders;
4604 4604
4605 4605 devp = ddi_get_driver_private(devi);
4606 4606
4607 4607 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
4608 4608 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
4609 4609 phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2;
4610 4610 phygeomp->g_acyl = 2;
4611 4611 phygeomp->g_nhead = devp->dcd_ident->dcd_heads;
4612 4612 phygeomp->g_nsect = devp->dcd_ident->dcd_sectors;
4613 4613
4614 4614 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4615 4615 no_of_lbasec = no_of_lbasec << 16;
4616 4616 no_of_lbasec = no_of_lbasec |
4617 4617 devp->dcd_ident->dcd_addrsec[0];
4618 4618 capacity = devp->dcd_ident->dcd_fixcyls *
4619 4619 devp->dcd_ident->dcd_heads *
4620 4620 devp->dcd_ident->dcd_sectors;
4621 4621 if (no_of_lbasec > capacity) {
4622 4622 capacity = no_of_lbasec;
4623 4623 if (capacity > NUM_SECTORS_32G) {
4624 4624 /*
4625 4625 * if the capacity is greater than 32G,
4626 4626 * then 255 is the sectors per track.
4627 4627 * This should be good until 128G disk
4628 4628 * capacity, which is the current ATA-4
4629 4629 * limitation.
4630 4630 */
4631 4631 phygeomp->g_nsect = 255;
4632 4632 }
4633 4633
4634 4634 /*
4635 4635 * If the disk capacity is >= 128GB then no. of
4636 4636 * addressable sectors will be set to 0xfffffff
4637 4637 * in the IDENTIFY info. In that case set the
4638 4638 * no. of pcyl to the Max. 16bit value.
4639 4639 */
4640 4640
4641 4641 calculated_cylinders = (capacity) /
4642 4642 (phygeomp->g_nhead * phygeomp->g_nsect);
4643 4643 if (calculated_cylinders >= USHRT_MAX) {
4644 4644 phygeomp->g_ncyl = USHRT_MAX - 2;
4645 4645 } else {
4646 4646 phygeomp->g_ncyl =
4647 4647 calculated_cylinders - 2;
4648 4648 }
4649 4649 }
4650 4650
4651 4651 phygeomp->g_capacity = capacity;
4652 4652 phygeomp->g_intrlv = 0;
4653 4653 phygeomp->g_rpm = 5400;
4654 4654 phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz;
4655 4655
4656 4656 return (0);
4657 4657 } else
4658 4658 return (ENOTSUP);
4659 4659 } else {
4660 4660 return (EINVAL);
4661 4661 }
4662 4662 }
4663 4663
4664 4664
4665 4665 /*ARGSUSED3*/
4666 4666 static int
4667 4667 dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
4668 4668 {
4669 4669 struct dcd_disk *un;
4670 4670
4671 4671 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4672 4672
4673 4673 if (un == NULL)
4674 4674 return (ENXIO);
4675 4675
4676 4676 switch (cmd) {
4677 4677 case TG_GETPHYGEOM:
4678 4678 return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg));
4679 4679
4680 4680 case TG_GETVIRTGEOM:
4681 4681 return (-1);
4682 4682
4683 4683 case TG_GETCAPACITY:
4684 4684 case TG_GETBLOCKSIZE:
4685 4685 mutex_enter(DCD_MUTEX);
4686 4686 if (un->un_diskcapacity <= 0) {
4687 4687 mutex_exit(DCD_MUTEX);
4688 4688 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4689 4689 "invalid disk capacity\n");
4690 4690 return (EIO);
4691 4691 }
4692 4692 if (cmd == TG_GETCAPACITY)
4693 4693 *(diskaddr_t *)arg = un->un_diskcapacity;
4694 4694 else
4695 4695 *(uint32_t *)arg = DEV_BSIZE;
4696 4696
4697 4697 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n",
4698 4698 un->un_diskcapacity);
4699 4699 mutex_exit(DCD_MUTEX);
4700 4700 return (0);
4701 4701
4702 4702 case TG_GETATTR:
4703 4703 mutex_enter(DCD_MUTEX);
4704 4704 *(tg_attribute_t *)arg = un->un_tgattribute;
4705 4705 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4706 4706 "media_is_writable %x\n",
4707 4707 un->un_tgattribute.media_is_writable);
4708 4708 mutex_exit(DCD_MUTEX);
4709 4709 return (0);
4710 4710 default:
4711 4711 return (ENOTTY);
4712 4712 }
4713 4713 }
↓ open down ↓ |
3997 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX