1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 #include <sys/scsi/scsi.h>
  27 #include <sys/dktp/cm.h>
  28 #include <sys/dktp/quetypes.h>
  29 #include <sys/dktp/queue.h>
  30 #include <sys/dktp/fctypes.h>
  31 #include <sys/dktp/flowctrl.h>
  32 #include <sys/dktp/cmdev.h>
  33 #include <sys/dkio.h>
  34 #include <sys/dktp/tgdk.h>
  35 #include <sys/dktp/dadk.h>
  36 #include <sys/dktp/bbh.h>
  37 #include <sys/dktp/altsctr.h>
  38 #include <sys/dktp/cmdk.h>
  39 
  40 #include <sys/stat.h>
  41 #include <sys/vtoc.h>
  42 #include <sys/file.h>
  43 #include <sys/dktp/dadkio.h>
  44 #include <sys/aio_req.h>
  45 
  46 #include <sys/cmlb.h>
  47 
  48 /*
  49  * Local Static Data
  50  */
  51 #ifdef CMDK_DEBUG
  52 #define DENT    0x0001
  53 #define DIO     0x0002
  54 
  55 static  int     cmdk_debug = DIO;
  56 #endif
  57 
  58 #ifndef TRUE
  59 #define TRUE    1
  60 #endif
  61 
  62 #ifndef FALSE
  63 #define FALSE   0
  64 #endif
  65 
  66 /*
  67  * NDKMAP is the base number for accessing the fdisk partitions.
  68  * c?d?p0 --> cmdk@?,?:q
  69  */
  70 #define PARTITION0_INDEX        (NDKMAP + 0)
  71 
  72 #define DKTP_DATA               (dkp->dk_tgobjp)->tg_data
  73 #define DKTP_EXT                (dkp->dk_tgobjp)->tg_ext
  74 
  75 void *cmdk_state;
  76 
  77 /*
  78  * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
  79  * attach situations
  80  */
  81 static kmutex_t cmdk_attach_mutex;
  82 static int cmdk_max_instance = 0;
  83 
  84 /*
  85  * Panic dumpsys state
  86  * There is only a single flag that is not mutex locked since
  87  * the system is prevented from thread switching and cmdk_dump
  88  * will only be called in a single threaded operation.
  89  */
  90 static int      cmdk_indump;
  91 
  92 /*
  93  * Local Function Prototypes
  94  */
  95 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
  96 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
  97 static void cmdkmin(struct buf *bp);
  98 static int cmdkrw(dev_t dev, struct uio *uio, int flag);
  99 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
 100 
 101 /*
 102  * Bad Block Handling Functions Prototypes
 103  */
 104 static void cmdk_bbh_reopen(struct cmdk *dkp);
 105 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
 106 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
 107 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
 108 static void cmdk_bbh_close(struct cmdk *dkp);
 109 static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
 110 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
 111 
 112 static struct bbh_objops cmdk_bbh_ops = {
 113         nulldev,
 114         nulldev,
 115         cmdk_bbh_gethandle,
 116         cmdk_bbh_htoc,
 117         cmdk_bbh_freehandle,
 118         0, 0
 119 };
 120 
 121 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
 122 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
 123 static int cmdkstrategy(struct buf *bp);
 124 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
 125 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 126 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
 127 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
 128 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
 129     int mod_flags, char *name, caddr_t valuep, int *lengthp);
 130 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
 131 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
 132 
 133 /*
 134  * Device driver ops vector
 135  */
 136 
 137 static struct cb_ops cmdk_cb_ops = {
 138         cmdkopen,               /* open */
 139         cmdkclose,              /* close */
 140         cmdkstrategy,           /* strategy */
 141         nodev,                  /* print */
 142         cmdkdump,               /* dump */
 143         cmdkread,               /* read */
 144         cmdkwrite,              /* write */
 145         cmdkioctl,              /* ioctl */
 146         nodev,                  /* devmap */
 147         nodev,                  /* mmap */
 148         nodev,                  /* segmap */
 149         nochpoll,               /* poll */
 150         cmdk_prop_op,           /* cb_prop_op */
 151         0,                      /* streamtab  */
 152         D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */
 153         CB_REV,                 /* cb_rev */
 154         cmdkaread,              /* async read */
 155         cmdkawrite              /* async write */
 156 };
 157 
 158 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
 159     void **result);
 160 static int cmdkprobe(dev_info_t *dip);
 161 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
 162 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
 163 
 164 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp);
 165 static int cmdkresume(dev_info_t *dip);
 166 static int cmdksuspend(dev_info_t *dip);
 167 static int cmdkpower(dev_info_t *dip, int component, int level);
 168 
 169 struct dev_ops cmdk_ops = {
 170         DEVO_REV,               /* devo_rev, */
 171         0,                      /* refcnt  */
 172         cmdkinfo,               /* info */
 173         nulldev,                /* identify */
 174         cmdkprobe,              /* probe */
 175         cmdkattach,             /* attach */
 176         cmdkdetach,             /* detach */
 177         nodev,                  /* reset */
 178         &cmdk_cb_ops,               /* driver operations */
 179         (struct bus_ops *)0,    /* bus operations */
 180         cmdkpower,              /* power */
 181         ddi_quiesce_not_needed, /* quiesce */
 182 };
 183 
 184 /*
 185  * This is the loadable module wrapper.
 186  */
 187 #include <sys/modctl.h>
 188 
 189 #ifndef XPV_HVM_DRIVER
 190 static struct modldrv modldrv = {
 191         &mod_driverops,             /* Type of module. This one is a driver */
 192         "Common Direct Access Disk",
 193         &cmdk_ops,                          /* driver ops           */
 194 };
 195 
 196 static struct modlinkage modlinkage = {
 197         MODREV_1, (void *)&modldrv, NULL
 198 };
 199 
 200 
 201 #else /* XPV_HVM_DRIVER */
 202 static struct modlmisc modlmisc = {
 203         &mod_miscops,               /* Type of module. This one is a misc */
 204         "HVM Common Direct Access Disk",
 205 };
 206 
 207 static struct modlinkage modlinkage = {
 208         MODREV_1, (void *)&modlmisc, NULL
 209 };
 210 
 211 #endif /* XPV_HVM_DRIVER */
 212 
 213 /* Function prototypes for cmlb callbacks */
 214 
 215 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
 216     diskaddr_t start, size_t length, void *tg_cookie);
 217 
 218 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd,  void *arg,
 219     void *tg_cookie);
 220 
 221 static void cmdk_devid_setup(struct cmdk *dkp);
 222 static int cmdk_devid_modser(struct cmdk *dkp);
 223 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
 224 static int cmdk_devid_fabricate(struct cmdk *dkp);
 225 static int cmdk_devid_read(struct cmdk *dkp);
 226 
 227 static cmlb_tg_ops_t cmdk_lb_ops = {
 228         TG_DK_OPS_VERSION_1,
 229         cmdk_lb_rdwr,
 230         cmdk_lb_getinfo
 231 };
 232 
 233 static boolean_t
 234 cmdk_isopen(struct cmdk *dkp, dev_t dev)
 235 {
 236         int             part, otyp;
 237         ulong_t         partbit;
 238 
 239         ASSERT(MUTEX_HELD((&dkp->dk_mutex)));
 240 
 241         part = CMDKPART(dev);
 242         partbit = 1 << part;
 243 
 244         /* account for close */
 245         if (dkp->dk_open_lyr[part] != 0)
 246                 return (B_TRUE);
 247         for (otyp = 0; otyp < OTYPCNT; otyp++)
 248                 if (dkp->dk_open_reg[otyp] & partbit)
 249                         return (B_TRUE);
 250         return (B_FALSE);
 251 }
 252 
 253 int
 254 _init(void)
 255 {
 256         int     rval;
 257 
 258 #ifndef XPV_HVM_DRIVER
 259         if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
 260                 return (rval);
 261 #endif /* !XPV_HVM_DRIVER */
 262 
 263         mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
 264         if ((rval = mod_install(&modlinkage)) != 0) {
 265                 mutex_destroy(&cmdk_attach_mutex);
 266 #ifndef XPV_HVM_DRIVER
 267                 ddi_soft_state_fini(&cmdk_state);
 268 #endif /* !XPV_HVM_DRIVER */
 269         }
 270         return (rval);
 271 }
 272 
 273 int
 274 _fini(void)
 275 {
 276         return (EBUSY);
 277 }
 278 
 279 int
 280 _info(struct modinfo *modinfop)
 281 {
 282         return (mod_info(&modlinkage, modinfop));
 283 }
 284 
 285 /*
 286  * Autoconfiguration Routines
 287  */
 288 static int
 289 cmdkprobe(dev_info_t *dip)
 290 {
 291         int     instance;
 292         int     status;
 293         struct  cmdk    *dkp;
 294 
 295         instance = ddi_get_instance(dip);
 296 
 297 #ifndef XPV_HVM_DRIVER
 298         if (ddi_get_soft_state(cmdk_state, instance))
 299                 return (DDI_PROBE_PARTIAL);
 300 
 301         if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS)
 302                 return (DDI_PROBE_PARTIAL);
 303 #endif /* !XPV_HVM_DRIVER */
 304 
 305         if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)
 306                 return (DDI_PROBE_PARTIAL);
 307 
 308         mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
 309         rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
 310         dkp->dk_dip = dip;
 311         mutex_enter(&dkp->dk_mutex);
 312 
 313         dkp->dk_dev = makedevice(ddi_driver_major(dip),
 314             ddi_get_instance(dip) << CMDK_UNITSHF);
 315 
 316         /* linkage to dadk and strategy */
 317         if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
 318                 mutex_exit(&dkp->dk_mutex);
 319                 mutex_destroy(&dkp->dk_mutex);
 320                 rw_destroy(&dkp->dk_bbh_mutex);
 321 #ifndef XPV_HVM_DRIVER
 322                 ddi_soft_state_free(cmdk_state, instance);
 323 #endif /* !XPV_HVM_DRIVER */
 324                 return (DDI_PROBE_PARTIAL);
 325         }
 326 
 327         status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
 328         if (status != DDI_PROBE_SUCCESS) {
 329                 cmdk_destroy_obj(dip, dkp);     /* dadk/strategy linkage  */
 330                 mutex_exit(&dkp->dk_mutex);
 331                 mutex_destroy(&dkp->dk_mutex);
 332                 rw_destroy(&dkp->dk_bbh_mutex);
 333 #ifndef XPV_HVM_DRIVER
 334                 ddi_soft_state_free(cmdk_state, instance);
 335 #endif /* !XPV_HVM_DRIVER */
 336                 return (status);
 337         }
 338 
 339         mutex_exit(&dkp->dk_mutex);
 340 #ifdef CMDK_DEBUG
 341         if (cmdk_debug & DENT)
 342                 PRF("cmdkprobe: instance= %d name= `%s`\n",
 343                     instance, ddi_get_name_addr(dip));
 344 #endif
 345         return (status);
 346 }
 347 
 348 static int
 349 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 350 {
 351         int             instance;
 352         struct          cmdk *dkp;
 353         char            *node_type;
 354 
 355         switch (cmd) {
 356         case DDI_ATTACH:
 357                 break;
 358         case DDI_RESUME:
 359                 return (cmdkresume(dip));
 360         default:
 361                 return (DDI_FAILURE);
 362         }
 363 
 364         instance = ddi_get_instance(dip);
 365         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 366                 return (DDI_FAILURE);
 367 
 368         dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
 369         mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
 370 
 371         mutex_enter(&dkp->dk_mutex);
 372 
 373         /* dadk_attach is an empty function that only returns SUCCESS */
 374         (void) dadk_attach(DKTP_DATA);
 375 
 376         node_type = (DKTP_EXT->tg_nodetype);
 377 
 378         /*
 379          * this open allows cmlb to read the device
 380          * and determine the label types
 381          * so that cmlb can create minor nodes for device
 382          */
 383 
 384         /* open the target disk  */
 385         if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
 386                 goto fail2;
 387 
 388 #ifdef _ILP32
 389         {
 390                 struct  tgdk_geom phyg;
 391                 (void) dadk_getphygeom(DKTP_DATA, &phyg);
 392                 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) {
 393                         (void) dadk_close(DKTP_DATA);
 394                         goto fail2;
 395                 }
 396         }
 397 #endif
 398 
 399 
 400         /* mark as having opened target */
 401         dkp->dk_flag |= CMDK_TGDK_OPEN;
 402 
 403         cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
 404 
 405         if (cmlb_attach(dip,
 406             &cmdk_lb_ops,
 407             DTYPE_DIRECT,               /* device_type */
 408             B_FALSE,                    /* removable */
 409             B_FALSE,                    /* hot pluggable XXX */
 410             node_type,
 411             CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT,  /* alter_behaviour */
 412             dkp->dk_cmlbhandle,
 413             0) != 0)
 414                 goto fail1;
 415 
 416         /* Calling validate will create minor nodes according to disk label */
 417         (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0);
 418 
 419         /* set bbh (Bad Block Handling) */
 420         cmdk_bbh_reopen(dkp);
 421 
 422         /* setup devid string */
 423         cmdk_devid_setup(dkp);
 424 
 425         mutex_enter(&cmdk_attach_mutex);
 426         if (instance > cmdk_max_instance)
 427                 cmdk_max_instance = instance;
 428         mutex_exit(&cmdk_attach_mutex);
 429 
 430         mutex_exit(&dkp->dk_mutex);
 431 
 432         /*
 433          * Add a zero-length attribute to tell the world we support
 434          * kernel ioctls (for layered drivers)
 435          */
 436         (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
 437             DDI_KERNEL_IOCTL, NULL, 0);
 438         ddi_report_dev(dip);
 439 
 440         /*
 441          * Initialize power management
 442          */
 443         mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL);
 444         cv_init(&dkp->dk_suspend_cv,   NULL, CV_DRIVER, NULL);
 445         cmdk_setup_pm(dip, dkp);
 446 
 447         return (DDI_SUCCESS);
 448 
 449 fail1:
 450         cmlb_free_handle(&dkp->dk_cmlbhandle);
 451         (void) dadk_close(DKTP_DATA);
 452 fail2:
 453         cmdk_destroy_obj(dip, dkp);
 454         rw_destroy(&dkp->dk_bbh_mutex);
 455         mutex_exit(&dkp->dk_mutex);
 456         mutex_destroy(&dkp->dk_mutex);
 457 #ifndef XPV_HVM_DRIVER
 458         ddi_soft_state_free(cmdk_state, instance);
 459 #endif /* !XPV_HVM_DRIVER */
 460         return (DDI_FAILURE);
 461 }
 462 
 463 
 464 static int
 465 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 466 {
 467         struct cmdk     *dkp;
 468         int             instance;
 469         int             max_instance;
 470 
 471         switch (cmd) {
 472         case DDI_DETACH:
 473                 /* return (DDI_FAILURE); */
 474                 break;
 475         case DDI_SUSPEND:
 476                 return (cmdksuspend(dip));
 477         default:
 478 #ifdef CMDK_DEBUG
 479                 if (cmdk_debug & DIO) {
 480                         PRF("cmdkdetach: cmd = %d unknown\n", cmd);
 481                 }
 482 #endif
 483                 return (DDI_FAILURE);
 484         }
 485 
 486         mutex_enter(&cmdk_attach_mutex);
 487         max_instance = cmdk_max_instance;
 488         mutex_exit(&cmdk_attach_mutex);
 489 
 490         /* check if any instance of driver is open */
 491         for (instance = 0; instance < max_instance; instance++) {
 492                 dkp = ddi_get_soft_state(cmdk_state, instance);
 493                 if (!dkp)
 494                         continue;
 495                 if (dkp->dk_flag & CMDK_OPEN)
 496                         return (DDI_FAILURE);
 497         }
 498 
 499         instance = ddi_get_instance(dip);
 500         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 501                 return (DDI_SUCCESS);
 502 
 503         mutex_enter(&dkp->dk_mutex);
 504 
 505         /*
 506          * The cmdk_part_info call at the end of cmdkattach may have
 507          * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
 508          * detach for case when cmdkopen/cmdkclose never occurs.
 509          */
 510         if (dkp->dk_flag & CMDK_TGDK_OPEN) {
 511                 dkp->dk_flag &= ~CMDK_TGDK_OPEN;
 512                 (void) dadk_close(DKTP_DATA);
 513         }
 514 
 515         cmlb_detach(dkp->dk_cmlbhandle, 0);
 516         cmlb_free_handle(&dkp->dk_cmlbhandle);
 517         ddi_prop_remove_all(dip);
 518 
 519         cmdk_destroy_obj(dip, dkp);     /* dadk/strategy linkage  */
 520 
 521         /*
 522          * free the devid structure if allocated before
 523          */
 524         if (dkp->dk_devid) {
 525                 ddi_devid_free(dkp->dk_devid);
 526                 dkp->dk_devid = NULL;
 527         }
 528 
 529         mutex_exit(&dkp->dk_mutex);
 530         mutex_destroy(&dkp->dk_mutex);
 531         rw_destroy(&dkp->dk_bbh_mutex);
 532         mutex_destroy(&dkp->dk_pm_mutex);
 533         cv_destroy(&dkp->dk_suspend_cv);
 534 #ifndef XPV_HVM_DRIVER
 535         ddi_soft_state_free(cmdk_state, instance);
 536 #endif /* !XPV_HVM_DRIVER */
 537 
 538         return (DDI_SUCCESS);
 539 }
 540 
 541 static int
 542 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
 543 {
 544         dev_t           dev = (dev_t)arg;
 545         int             instance;
 546         struct  cmdk    *dkp;
 547 
 548 #ifdef lint
 549         dip = dip;      /* no one ever uses this */
 550 #endif
 551 #ifdef CMDK_DEBUG
 552         if (cmdk_debug & DENT)
 553                 PRF("cmdkinfo: call\n");
 554 #endif
 555         instance = CMDKUNIT(dev);
 556 
 557         switch (infocmd) {
 558                 case DDI_INFO_DEVT2DEVINFO:
 559                         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 560                                 return (DDI_FAILURE);
 561                         *result = (void *) dkp->dk_dip;
 562                         break;
 563                 case DDI_INFO_DEVT2INSTANCE:
 564                         *result = (void *)(intptr_t)instance;
 565                         break;
 566                 default:
 567                         return (DDI_FAILURE);
 568         }
 569         return (DDI_SUCCESS);
 570 }
 571 
 572 /*
 573  * Initialize the power management components
 574  */
 575 static void
 576 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp)
 577 {
 578         char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL };
 579 
 580         /*
 581          * Since the cmdk device does not the 'reg' property,
 582          * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
 583          * The following code is to tell cpr that this device
 584          * DOES need to be suspended and resumed.
 585          */
 586         (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
 587             "pm-hardware-state", "needs-suspend-resume");
 588 
 589         if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
 590             "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) {
 591                 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) {
 592                         mutex_enter(&dkp->dk_pm_mutex);
 593                         dkp->dk_pm_level = CMDK_SPINDLE_ON;
 594                         dkp->dk_pm_is_enabled = 1;
 595                         mutex_exit(&dkp->dk_pm_mutex);
 596                 } else {
 597                         mutex_enter(&dkp->dk_pm_mutex);
 598                         dkp->dk_pm_level = CMDK_SPINDLE_OFF;
 599                         dkp->dk_pm_is_enabled = 0;
 600                         mutex_exit(&dkp->dk_pm_mutex);
 601                 }
 602         } else {
 603                 mutex_enter(&dkp->dk_pm_mutex);
 604                 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
 605                 dkp->dk_pm_is_enabled = 0;
 606                 mutex_exit(&dkp->dk_pm_mutex);
 607         }
 608 }
 609 
 610 /*
 611  * suspend routine, it will be run when get the command
 612  * DDI_SUSPEND at detach(9E) from system power management
 613  */
 614 static int
 615 cmdksuspend(dev_info_t *dip)
 616 {
 617         struct cmdk     *dkp;
 618         int             instance;
 619         clock_t         count = 0;
 620 
 621         instance = ddi_get_instance(dip);
 622         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 623                 return (DDI_FAILURE);
 624         mutex_enter(&dkp->dk_mutex);
 625         if (dkp->dk_flag & CMDK_SUSPEND) {
 626                 mutex_exit(&dkp->dk_mutex);
 627                 return (DDI_SUCCESS);
 628         }
 629         dkp->dk_flag |= CMDK_SUSPEND;
 630 
 631         /* need to wait a while */
 632         while (dadk_getcmds(DKTP_DATA) != 0) {
 633                 delay(drv_usectohz(1000000));
 634                 if (count > 60) {
 635                         dkp->dk_flag &= ~CMDK_SUSPEND;
 636                         cv_broadcast(&dkp->dk_suspend_cv);
 637                         mutex_exit(&dkp->dk_mutex);
 638                         return (DDI_FAILURE);
 639                 }
 640                 count++;
 641         }
 642         mutex_exit(&dkp->dk_mutex);
 643         return (DDI_SUCCESS);
 644 }
 645 
 646 /*
 647  * resume routine, it will be run when get the command
 648  * DDI_RESUME at attach(9E) from system power management
 649  */
 650 static int
 651 cmdkresume(dev_info_t *dip)
 652 {
 653         struct cmdk     *dkp;
 654         int             instance;
 655 
 656         instance = ddi_get_instance(dip);
 657         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 658                 return (DDI_FAILURE);
 659         mutex_enter(&dkp->dk_mutex);
 660         if (!(dkp->dk_flag & CMDK_SUSPEND)) {
 661                 mutex_exit(&dkp->dk_mutex);
 662                 return (DDI_FAILURE);
 663         }
 664         dkp->dk_pm_level = CMDK_SPINDLE_ON;
 665         dkp->dk_flag &= ~CMDK_SUSPEND;
 666         cv_broadcast(&dkp->dk_suspend_cv);
 667         mutex_exit(&dkp->dk_mutex);
 668         return (DDI_SUCCESS);
 669 
 670 }
 671 
 672 /*
 673  * power management entry point, it was used to
 674  * change power management component.
 675  * Actually, the real hard drive suspend/resume
 676  * was handled in ata, so this function is not
 677  * doing any real work other than verifying that
 678  * the disk is idle.
 679  */
 680 static int
 681 cmdkpower(dev_info_t *dip, int component, int level)
 682 {
 683         struct cmdk     *dkp;
 684         int             instance;
 685 
 686         instance = ddi_get_instance(dip);
 687         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
 688             component != 0 || level > CMDK_SPINDLE_ON ||
 689             level < CMDK_SPINDLE_OFF) {
 690                 return (DDI_FAILURE);
 691         }
 692 
 693         mutex_enter(&dkp->dk_pm_mutex);
 694         if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) {
 695                 mutex_exit(&dkp->dk_pm_mutex);
 696                 return (DDI_SUCCESS);
 697         }
 698         mutex_exit(&dkp->dk_pm_mutex);
 699 
 700         if ((level == CMDK_SPINDLE_OFF) &&
 701             (dadk_getcmds(DKTP_DATA) != 0)) {
 702                 return (DDI_FAILURE);
 703         }
 704 
 705         mutex_enter(&dkp->dk_pm_mutex);
 706         dkp->dk_pm_level = level;
 707         mutex_exit(&dkp->dk_pm_mutex);
 708         return (DDI_SUCCESS);
 709 }
 710 
 711 static int
 712 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
 713     char *name, caddr_t valuep, int *lengthp)
 714 {
 715         struct  cmdk    *dkp;
 716 
 717 #ifdef CMDK_DEBUG
 718         if (cmdk_debug & DENT)
 719                 PRF("cmdk_prop_op: call\n");
 720 #endif
 721 
 722         dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
 723         if (dkp == NULL)
 724                 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
 725                     name, valuep, lengthp));
 726 
 727         return (cmlb_prop_op(dkp->dk_cmlbhandle,
 728             dev, dip, prop_op, mod_flags, name, valuep, lengthp,
 729             CMDKPART(dev), NULL));
 730 }
 731 
 732 /*
 733  * dump routine
 734  */
 735 static int
 736 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
 737 {
 738         int             instance;
 739         struct  cmdk    *dkp;
 740         diskaddr_t      p_lblksrt;
 741         diskaddr_t      p_lblkcnt;
 742         struct  buf     local;
 743         struct  buf     *bp;
 744 
 745 #ifdef CMDK_DEBUG
 746         if (cmdk_debug & DENT)
 747                 PRF("cmdkdump: call\n");
 748 #endif
 749         instance = CMDKUNIT(dev);
 750         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
 751                 return (ENXIO);
 752 
 753         if (cmlb_partinfo(
 754             dkp->dk_cmlbhandle,
 755             CMDKPART(dev),
 756             &p_lblkcnt,
 757             &p_lblksrt,
 758             NULL,
 759             NULL,
 760             0)) {
 761                 return (ENXIO);
 762         }
 763 
 764         if ((blkno+nblk) > p_lblkcnt)
 765                 return (EINVAL);
 766 
 767         cmdk_indump = 1;        /* Tell disk targets we are panic dumpping */
 768 
 769         bp = &local;
 770         bzero(bp, sizeof (*bp));
 771         bp->b_flags = B_BUSY;
 772         bp->b_un.b_addr = addr;
 773         bp->b_bcount = nblk << SCTRSHFT;
 774         SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
 775 
 776         (void) dadk_dump(DKTP_DATA, bp);
 777         return (bp->b_error);
 778 }
 779 
 780 /*
 781  * Copy in the dadkio_rwcmd according to the user's data model.  If needed,
 782  * convert it for our internal use.
 783  */
 784 static int
 785 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
 786 {
 787         switch (ddi_model_convert_from(flag)) {
 788                 case DDI_MODEL_ILP32: {
 789                         struct dadkio_rwcmd32 cmd32;
 790 
 791                         if (ddi_copyin(inaddr, &cmd32,
 792                             sizeof (struct dadkio_rwcmd32), flag)) {
 793                                 return (EFAULT);
 794                         }
 795 
 796                         rwcmdp->cmd = cmd32.cmd;
 797                         rwcmdp->flags = cmd32.flags;
 798                         rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr;
 799                         rwcmdp->buflen = cmd32.buflen;
 800                         rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
 801                         /*
 802                          * Note: we do not convert the 'status' field,
 803                          * as it should not contain valid data at this
 804                          * point.
 805                          */
 806                         bzero(&rwcmdp->status, sizeof (rwcmdp->status));
 807                         break;
 808                 }
 809                 case DDI_MODEL_NONE: {
 810                         if (ddi_copyin(inaddr, rwcmdp,
 811                             sizeof (struct dadkio_rwcmd), flag)) {
 812                                 return (EFAULT);
 813                         }
 814                 }
 815         }
 816         return (0);
 817 }
 818 
 819 /*
 820  * If necessary, convert the internal rwcmdp and status to the appropriate
 821  * data model and copy it out to the user.
 822  */
 823 static int
 824 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
 825 {
 826         switch (ddi_model_convert_from(flag)) {
 827                 case DDI_MODEL_ILP32: {
 828                         struct dadkio_rwcmd32 cmd32;
 829 
 830                         cmd32.cmd = rwcmdp->cmd;
 831                         cmd32.flags = rwcmdp->flags;
 832                         cmd32.blkaddr = rwcmdp->blkaddr;
 833                         cmd32.buflen = rwcmdp->buflen;
 834                         ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
 835                         cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
 836 
 837                         cmd32.status.status = rwcmdp->status.status;
 838                         cmd32.status.resid = rwcmdp->status.resid;
 839                         cmd32.status.failed_blk_is_valid =
 840                             rwcmdp->status.failed_blk_is_valid;
 841                         cmd32.status.failed_blk = rwcmdp->status.failed_blk;
 842                         cmd32.status.fru_code_is_valid =
 843                             rwcmdp->status.fru_code_is_valid;
 844                         cmd32.status.fru_code = rwcmdp->status.fru_code;
 845 
 846                         bcopy(rwcmdp->status.add_error_info,
 847                             cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
 848 
 849                         if (ddi_copyout(&cmd32, outaddr,
 850                             sizeof (struct dadkio_rwcmd32), flag))
 851                                 return (EFAULT);
 852                         break;
 853                 }
 854                 case DDI_MODEL_NONE: {
 855                         if (ddi_copyout(rwcmdp, outaddr,
 856                             sizeof (struct dadkio_rwcmd), flag))
 857                         return (EFAULT);
 858                 }
 859         }
 860         return (0);
 861 }
 862 
 863 /*
 864  * ioctl routine
 865  */
 866 static int
 867 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
 868 {
 869         int             instance;
 870         struct scsi_device *devp;
 871         struct cmdk     *dkp;
 872         char            data[NBPSCTR];
 873 
 874         instance = CMDKUNIT(dev);
 875         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
 876                 return (ENXIO);
 877 
 878         mutex_enter(&dkp->dk_mutex);
 879         while (dkp->dk_flag & CMDK_SUSPEND) {
 880                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
 881         }
 882         mutex_exit(&dkp->dk_mutex);
 883 
 884         bzero(data, sizeof (data));
 885 
 886         switch (cmd) {
 887 
 888         case DKIOCGMEDIAINFO: {
 889                 struct dk_minfo media_info;
 890                 struct  tgdk_geom phyg;
 891 
 892                 /* dadk_getphygeom always returns success */
 893                 (void) dadk_getphygeom(DKTP_DATA, &phyg);
 894 
 895                 media_info.dki_lbsize = phyg.g_secsiz;
 896                 media_info.dki_capacity = phyg.g_cap;
 897                 media_info.dki_media_type = DK_FIXED_DISK;
 898 
 899                 if (ddi_copyout(&media_info, (void *)arg,
 900                     sizeof (struct dk_minfo), flag)) {
 901                         return (EFAULT);
 902                 } else {
 903                         return (0);
 904                 }
 905         }
 906 
 907         case DKIOCINFO: {
 908                 struct dk_cinfo *info = (struct dk_cinfo *)data;
 909 
 910                 /* controller information */
 911                 info->dki_ctype = (DKTP_EXT->tg_ctype);
 912                 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
 913                 (void) strcpy(info->dki_cname,
 914                     ddi_get_name(ddi_get_parent(dkp->dk_dip)));
 915 
 916                 /* Unit Information */
 917                 info->dki_unit = ddi_get_instance(dkp->dk_dip);
 918                 devp = ddi_get_driver_private(dkp->dk_dip);
 919                 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
 920                 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
 921                 info->dki_flags = DKI_FMTVOL;
 922                 info->dki_partition = CMDKPART(dev);
 923 
 924                 info->dki_maxtransfer = maxphys / DEV_BSIZE;
 925                 info->dki_addr = 1;
 926                 info->dki_space = 0;
 927                 info->dki_prio = 0;
 928                 info->dki_vec = 0;
 929 
 930                 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
 931                         return (EFAULT);
 932                 else
 933                         return (0);
 934         }
 935 
 936         case DKIOCSTATE: {
 937                 int     state;
 938                 int     rval;
 939                 diskaddr_t      p_lblksrt;
 940                 diskaddr_t      p_lblkcnt;
 941 
 942                 if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
 943                         return (EFAULT);
 944 
 945                 /* dadk_check_media blocks until state changes */
 946                 if (rval = dadk_check_media(DKTP_DATA, &state))
 947                         return (rval);
 948 
 949                 if (state == DKIO_INSERTED) {
 950 
 951                         if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0)
 952                                 return (ENXIO);
 953 
 954                         if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
 955                             &p_lblkcnt, &p_lblksrt, NULL, NULL, 0))
 956                                 return (ENXIO);
 957 
 958                         if (p_lblkcnt <= 0)
 959                                 return (ENXIO);
 960                 }
 961 
 962                 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
 963                         return (EFAULT);
 964 
 965                 return (0);
 966         }
 967 
 968         /*
 969          * is media removable?
 970          */
 971         case DKIOCREMOVABLE: {
 972                 int i;
 973 
 974                 i = (DKTP_EXT->tg_rmb) ? 1 : 0;
 975 
 976                 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
 977                         return (EFAULT);
 978 
 979                 return (0);
 980         }
 981 
 982         case DKIOCADDBAD:
 983                 /*
 984                  * This is not an update mechanism to add bad blocks
 985                  * to the bad block structures stored on disk.
 986                  *
 987                  * addbadsec(1M) will update the bad block data on disk
 988                  * and use this ioctl to force the driver to re-initialize
 989                  * the list of bad blocks in the driver.
 990                  */
 991 
 992                 /* start BBH */
 993                 cmdk_bbh_reopen(dkp);
 994                 return (0);
 995 
 996         case DKIOCG_PHYGEOM:
 997         case DKIOCG_VIRTGEOM:
 998         case DKIOCGGEOM:
 999         case DKIOCSGEOM:
1000         case DKIOCGAPART:
1001         case DKIOCSAPART:
1002         case DKIOCGVTOC:
1003         case DKIOCSVTOC:
1004         case DKIOCPARTINFO:
1005         case DKIOCGEXTVTOC:
1006         case DKIOCSEXTVTOC:
1007         case DKIOCEXTPARTINFO:
1008         case DKIOCGMBOOT:
1009         case DKIOCSMBOOT:
1010         case DKIOCGETEFI:
1011         case DKIOCSETEFI:
1012         case DKIOCPARTITION:
1013         case DKIOCSETEXTPART:
1014         {
1015                 int rc;
1016 
1017                 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag,
1018                     credp, rvalp, 0);
1019                 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC)
1020                         cmdk_devid_setup(dkp);
1021                 return (rc);
1022         }
1023 
1024         case DIOCTL_RWCMD: {
1025                 struct  dadkio_rwcmd *rwcmdp;
1026                 int     status;
1027 
1028                 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
1029 
1030                 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
1031 
1032                 if (status == 0) {
1033                         bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
1034                         status = dadk_ioctl(DKTP_DATA,
1035                             dev,
1036                             cmd,
1037                             (uintptr_t)rwcmdp,
1038                             flag,
1039                             credp,
1040                             rvalp);
1041                 }
1042                 if (status == 0)
1043                         status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
1044 
1045                 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
1046                 return (status);
1047         }
1048 
1049         default:
1050                 return (dadk_ioctl(DKTP_DATA,
1051                     dev,
1052                     cmd,
1053                     arg,
1054                     flag,
1055                     credp,
1056                     rvalp));
1057         }
1058 }
1059 
1060 /*ARGSUSED1*/
1061 static int
1062 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
1063 {
1064         int             part;
1065         ulong_t         partbit;
1066         int             instance;
1067         struct cmdk     *dkp;
1068         int             lastclose = 1;
1069         int             i;
1070 
1071         instance = CMDKUNIT(dev);
1072         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1073             (otyp >= OTYPCNT))
1074                 return (ENXIO);
1075 
1076         mutex_enter(&dkp->dk_mutex);
1077 
1078         /* check if device has been opened */
1079         ASSERT(cmdk_isopen(dkp, dev));
1080         if (!(dkp->dk_flag & CMDK_OPEN)) {
1081                 mutex_exit(&dkp->dk_mutex);
1082                 return (ENXIO);
1083         }
1084 
1085         while (dkp->dk_flag & CMDK_SUSPEND) {
1086                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1087         }
1088 
1089         part = CMDKPART(dev);
1090         partbit = 1 << part;
1091 
1092         /* account for close */
1093         if (otyp == OTYP_LYR) {
1094                 ASSERT(dkp->dk_open_lyr[part] > 0);
1095                 if (dkp->dk_open_lyr[part])
1096                         dkp->dk_open_lyr[part]--;
1097         } else {
1098                 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0);
1099                 dkp->dk_open_reg[otyp] &= ~partbit;
1100         }
1101         dkp->dk_open_exl &= ~partbit;
1102 
1103         for (i = 0; i < CMDK_MAXPART; i++)
1104                 if (dkp->dk_open_lyr[i] != 0) {
1105                         lastclose = 0;
1106                         break;
1107                 }
1108 
1109         if (lastclose)
1110                 for (i = 0; i < OTYPCNT; i++)
1111                         if (dkp->dk_open_reg[i] != 0) {
1112                                 lastclose = 0;
1113                                 break;
1114                         }
1115 
1116         mutex_exit(&dkp->dk_mutex);
1117 
1118         if (lastclose)
1119                 cmlb_invalidate(dkp->dk_cmlbhandle, 0);
1120 
1121         return (DDI_SUCCESS);
1122 }
1123 
1124 /*ARGSUSED3*/
1125 static int
1126 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
1127 {
1128         dev_t           dev = *dev_p;
1129         int             part;
1130         ulong_t         partbit;
1131         int             instance;
1132         struct  cmdk    *dkp;
1133         diskaddr_t      p_lblksrt;
1134         diskaddr_t      p_lblkcnt;
1135         int             i;
1136         int             nodelay;
1137 
1138         instance = CMDKUNIT(dev);
1139         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1140                 return (ENXIO);
1141 
1142         if (otyp >= OTYPCNT)
1143                 return (EINVAL);
1144 
1145         mutex_enter(&dkp->dk_mutex);
1146         while (dkp->dk_flag & CMDK_SUSPEND) {
1147                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1148         }
1149         mutex_exit(&dkp->dk_mutex);
1150 
1151         part = CMDKPART(dev);
1152         partbit = 1 << part;
1153         nodelay = (flag & (FNDELAY | FNONBLOCK));
1154 
1155         mutex_enter(&dkp->dk_mutex);
1156 
1157         if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) {
1158 
1159                 /* fail if not doing non block open */
1160                 if (!nodelay) {
1161                         mutex_exit(&dkp->dk_mutex);
1162                         return (ENXIO);
1163                 }
1164         } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
1165             &p_lblksrt, NULL, NULL, 0) == 0) {
1166 
1167                 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
1168                         mutex_exit(&dkp->dk_mutex);
1169                         return (ENXIO);
1170                 }
1171         } else {
1172                 /* fail if not doing non block open */
1173                 if (!nodelay) {
1174                         mutex_exit(&dkp->dk_mutex);
1175                         return (ENXIO);
1176                 }
1177         }
1178 
1179         if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
1180                 mutex_exit(&dkp->dk_mutex);
1181                 return (EROFS);
1182         }
1183 
1184         /* check for part already opend exclusively */
1185         if (dkp->dk_open_exl & partbit)
1186                 goto excl_open_fail;
1187 
1188         /* check if we can establish exclusive open */
1189         if (flag & FEXCL) {
1190                 if (dkp->dk_open_lyr[part])
1191                         goto excl_open_fail;
1192                 for (i = 0; i < OTYPCNT; i++) {
1193                         if (dkp->dk_open_reg[i] & partbit)
1194                                 goto excl_open_fail;
1195                 }
1196         }
1197 
1198         /* open will succeed, account for open */
1199         dkp->dk_flag |= CMDK_OPEN;
1200         if (otyp == OTYP_LYR)
1201                 dkp->dk_open_lyr[part]++;
1202         else
1203                 dkp->dk_open_reg[otyp] |= partbit;
1204         if (flag & FEXCL)
1205                 dkp->dk_open_exl |= partbit;
1206 
1207         mutex_exit(&dkp->dk_mutex);
1208         return (DDI_SUCCESS);
1209 
1210 excl_open_fail:
1211         mutex_exit(&dkp->dk_mutex);
1212         return (EBUSY);
1213 }
1214 
1215 /*
1216  * read routine
1217  */
1218 /*ARGSUSED2*/
1219 static int
1220 cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1221 {
1222         return (cmdkrw(dev, uio, B_READ));
1223 }
1224 
1225 /*
1226  * async read routine
1227  */
1228 /*ARGSUSED2*/
1229 static int
1230 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1231 {
1232         return (cmdkarw(dev, aio, B_READ));
1233 }
1234 
1235 /*
1236  * write routine
1237  */
1238 /*ARGSUSED2*/
1239 static int
1240 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1241 {
1242         return (cmdkrw(dev, uio, B_WRITE));
1243 }
1244 
1245 /*
1246  * async write routine
1247  */
1248 /*ARGSUSED2*/
1249 static int
1250 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1251 {
1252         return (cmdkarw(dev, aio, B_WRITE));
1253 }
1254 
1255 static void
1256 cmdkmin(struct buf *bp)
1257 {
1258         if (bp->b_bcount > DK_MAXRECSIZE)
1259                 bp->b_bcount = DK_MAXRECSIZE;
1260 }
1261 
1262 static int
1263 cmdkrw(dev_t dev, struct uio *uio, int flag)
1264 {
1265         int             instance;
1266         struct  cmdk    *dkp;
1267 
1268         instance = CMDKUNIT(dev);
1269         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1270                 return (ENXIO);
1271 
1272         mutex_enter(&dkp->dk_mutex);
1273         while (dkp->dk_flag & CMDK_SUSPEND) {
1274                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1275         }
1276         mutex_exit(&dkp->dk_mutex);
1277 
1278         return (physio(cmdkstrategy, (struct buf *)0, dev, flag, cmdkmin, uio));
1279 }
1280 
1281 static int
1282 cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1283 {
1284         int             instance;
1285         struct  cmdk    *dkp;
1286 
1287         instance = CMDKUNIT(dev);
1288         if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1289                 return (ENXIO);
1290 
1291         mutex_enter(&dkp->dk_mutex);
1292         while (dkp->dk_flag & CMDK_SUSPEND) {
1293                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1294         }
1295         mutex_exit(&dkp->dk_mutex);
1296 
1297         return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1298 }
1299 
1300 /*
1301  * strategy routine
1302  */
1303 static int
1304 cmdkstrategy(struct buf *bp)
1305 {
1306         int             instance;
1307         struct  cmdk    *dkp;
1308         long            d_cnt;
1309         diskaddr_t      p_lblksrt;
1310         diskaddr_t      p_lblkcnt;
1311 
1312         instance = CMDKUNIT(bp->b_edev);
1313         if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1314             (dkblock(bp) < 0)) {
1315                 bp->b_resid = bp->b_bcount;
1316                 SETBPERR(bp, ENXIO);
1317                 biodone(bp);
1318                 return (0);
1319         }
1320 
1321         mutex_enter(&dkp->dk_mutex);
1322         ASSERT(cmdk_isopen(dkp, bp->b_edev));
1323         while (dkp->dk_flag & CMDK_SUSPEND) {
1324                 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1325         }
1326         mutex_exit(&dkp->dk_mutex);
1327 
1328         bp->b_flags &= ~(B_DONE|B_ERROR);
1329         bp->b_resid = 0;
1330         bp->av_back = NULL;
1331 
1332         /*
1333          * only re-read the vtoc if necessary (force == FALSE)
1334          */
1335         if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev),
1336             &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) {
1337                 SETBPERR(bp, ENXIO);
1338         }
1339 
1340         if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1341                 SETBPERR(bp, ENXIO);
1342 
1343         if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1344                 bp->b_resid = bp->b_bcount;
1345                 biodone(bp);
1346                 return (0);
1347         }
1348 
1349         d_cnt = bp->b_bcount >> SCTRSHFT;
1350         if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1351                 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1352                 bp->b_bcount -= bp->b_resid;
1353         }
1354 
1355         SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1356         if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1357                 bp->b_resid += bp->b_bcount;
1358                 biodone(bp);
1359         }
1360         return (0);
1361 }
1362 
1363 static int
1364 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1365 {
1366         struct scsi_device *devp;
1367         opaque_t        queobjp = NULL;
1368         opaque_t        flcobjp = NULL;
1369         char            que_keyvalp[64];
1370         int             que_keylen;
1371         char            flc_keyvalp[64];
1372         int             flc_keylen;
1373 
1374         ASSERT(mutex_owned(&dkp->dk_mutex));
1375 
1376         /* Create linkage to queueing routines based on property */
1377         que_keylen = sizeof (que_keyvalp);
1378         if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1379             DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1380             DDI_PROP_SUCCESS) {
1381                 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1382                 return (DDI_FAILURE);
1383         }
1384         que_keyvalp[que_keylen] = (char)0;
1385 
1386         if (strcmp(que_keyvalp, "qfifo") == 0) {
1387                 queobjp = (opaque_t)qfifo_create();
1388         } else if (strcmp(que_keyvalp, "qsort") == 0) {
1389                 queobjp = (opaque_t)qsort_create();
1390         } else {
1391                 return (DDI_FAILURE);
1392         }
1393 
1394         /* Create linkage to dequeueing routines based on property */
1395         flc_keylen = sizeof (flc_keyvalp);
1396         if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1397             DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1398             DDI_PROP_SUCCESS) {
1399                 cmn_err(CE_WARN,
1400                     "cmdk_create_obj: flow-control property undefined");
1401                 return (DDI_FAILURE);
1402         }
1403 
1404         flc_keyvalp[flc_keylen] = (char)0;
1405 
1406         if (strcmp(flc_keyvalp, "dsngl") == 0) {
1407                 flcobjp = (opaque_t)dsngl_create();
1408         } else if (strcmp(flc_keyvalp, "dmult") == 0) {
1409                 flcobjp = (opaque_t)dmult_create();
1410         } else {
1411                 return (DDI_FAILURE);
1412         }
1413 
1414         /* populate bbh_obj object stored in dkp */
1415         dkp->dk_bbh_obj.bbh_data = dkp;
1416         dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1417 
1418         /* create linkage to dadk */
1419         dkp->dk_tgobjp = (opaque_t)dadk_create();
1420 
1421         devp = ddi_get_driver_private(dip);
1422         (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1423             NULL);
1424 
1425         return (DDI_SUCCESS);
1426 }
1427 
1428 static void
1429 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1430 {
1431         char            que_keyvalp[64];
1432         int             que_keylen;
1433         char            flc_keyvalp[64];
1434         int             flc_keylen;
1435 
1436         ASSERT(mutex_owned(&dkp->dk_mutex));
1437 
1438         (void) dadk_free((dkp->dk_tgobjp));
1439         dkp->dk_tgobjp = NULL;
1440 
1441         que_keylen = sizeof (que_keyvalp);
1442         if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1443             DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1444             DDI_PROP_SUCCESS) {
1445                 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1446                 return;
1447         }
1448         que_keyvalp[que_keylen] = (char)0;
1449 
1450         flc_keylen = sizeof (flc_keyvalp);
1451         if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1452             DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1453             DDI_PROP_SUCCESS) {
1454                 cmn_err(CE_WARN,
1455                     "cmdk_destroy_obj: flow-control property undefined");
1456                 return;
1457         }
1458         flc_keyvalp[flc_keylen] = (char)0;
1459 }
1460 /*ARGSUSED5*/
1461 static int
1462 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
1463     diskaddr_t start, size_t count, void *tg_cookie)
1464 {
1465         struct cmdk     *dkp;
1466         opaque_t        handle;
1467         int             rc = 0;
1468         char            *bufa;
1469         size_t          buflen;
1470 
1471         dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1472         if (dkp == NULL)
1473                 return (ENXIO);
1474 
1475         if (cmd != TG_READ && cmd != TG_WRITE)
1476                 return (EINVAL);
1477 
1478         /* buflen must be multiple of 512 */
1479         buflen = (count + NBPSCTR - 1) & -NBPSCTR;
1480         handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP);
1481         if (!handle)
1482                 return (ENOMEM);
1483 
1484         if (cmd == TG_READ) {
1485                 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1486                 if (!bufa)
1487                         rc = EIO;
1488                 else
1489                         bcopy(bufa, bufaddr, count);
1490         } else {
1491                 bufa = dadk_iob_htoc(DKTP_DATA, handle);
1492                 bcopy(bufaddr, bufa, count);
1493                 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1494                 if (!bufa)
1495                         rc = EIO;
1496         }
1497         (void) dadk_iob_free(DKTP_DATA, handle);
1498 
1499         return (rc);
1500 }
1501 
1502 /*ARGSUSED3*/
1503 static int
1504 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
1505 {
1506 
1507         struct cmdk             *dkp;
1508         struct tgdk_geom        phyg;
1509 
1510 
1511         dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1512         if (dkp == NULL)
1513                 return (ENXIO);
1514 
1515         switch (cmd) {
1516         case TG_GETPHYGEOM: {
1517                 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg;
1518 
1519                 /* dadk_getphygeom always returns success */
1520                 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1521 
1522                 phygeomp->g_capacity = phyg.g_cap;
1523                 phygeomp->g_nsect    = phyg.g_sec;
1524                 phygeomp->g_nhead    = phyg.g_head;
1525                 phygeomp->g_acyl     = phyg.g_acyl;
1526                 phygeomp->g_ncyl     = phyg.g_cyl;
1527                 phygeomp->g_secsize  = phyg.g_secsiz;
1528                 phygeomp->g_intrlv   = 1;
1529                 phygeomp->g_rpm              = 3600;
1530 
1531                 return (0);
1532         }
1533 
1534         case TG_GETVIRTGEOM: {
1535                 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg;
1536                 diskaddr_t              capacity;
1537 
1538                 (void) dadk_getgeom(DKTP_DATA, &phyg);
1539                 capacity = phyg.g_cap;
1540 
1541                 /*
1542                  * If the controller returned us something that doesn't
1543                  * really fit into an Int 13/function 8 geometry
1544                  * result, just fail the ioctl.  See PSARC 1998/313.
1545                  */
1546                 if (capacity < 0 || capacity >= 63 * 254 * 1024)
1547                         return (EINVAL);
1548 
1549                 virtgeomp->g_capacity        = capacity;
1550                 virtgeomp->g_nsect   = 63;
1551                 virtgeomp->g_nhead   = 254;
1552                 virtgeomp->g_ncyl    = capacity / (63 * 254);
1553                 virtgeomp->g_acyl    = 0;
1554                 virtgeomp->g_secsize = 512;
1555                 virtgeomp->g_intrlv  = 1;
1556                 virtgeomp->g_rpm     = 3600;
1557 
1558                 return (0);
1559         }
1560 
1561         case TG_GETCAPACITY:
1562         case TG_GETBLOCKSIZE:
1563         {
1564 
1565                 /* dadk_getphygeom always returns success */
1566                 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1567                 if (cmd == TG_GETCAPACITY)
1568                         *(diskaddr_t *)arg = phyg.g_cap;
1569                 else
1570                         *(uint32_t *)arg = (uint32_t)phyg.g_secsiz;
1571 
1572                 return (0);
1573         }
1574 
1575         case TG_GETATTR: {
1576                 tg_attribute_t *tgattribute = (tg_attribute_t *)arg;
1577                 if ((DKTP_EXT->tg_rdonly))
1578                         tgattribute->media_is_writable = FALSE;
1579                 else
1580                         tgattribute->media_is_writable = TRUE;
1581 
1582                 return (0);
1583         }
1584 
1585         default:
1586                 return (ENOTTY);
1587         }
1588 }
1589 
1590 
1591 
1592 
1593 
1594 /*
1595  * Create and register the devid.
1596  * There are 4 different ways we can get a device id:
1597  *    1. Already have one - nothing to do
1598  *    2. Build one from the drive's model and serial numbers
1599  *    3. Read one from the disk (first sector of last track)
1600  *    4. Fabricate one and write it on the disk.
1601  * If any of these succeeds, register the deviceid
1602  */
1603 static void
1604 cmdk_devid_setup(struct cmdk *dkp)
1605 {
1606         int     rc;
1607 
1608         /* Try options until one succeeds, or all have failed */
1609 
1610         /* 1. All done if already registered */
1611         if (dkp->dk_devid != NULL)
1612                 return;
1613 
1614         /* 2. Build a devid from the model and serial number */
1615         rc = cmdk_devid_modser(dkp);
1616         if (rc != DDI_SUCCESS) {
1617                 /* 3. Read devid from the disk, if present */
1618                 rc = cmdk_devid_read(dkp);
1619 
1620                 /* 4. otherwise make one up and write it on the disk */
1621                 if (rc != DDI_SUCCESS)
1622                         rc = cmdk_devid_fabricate(dkp);
1623         }
1624 
1625         /* If we managed to get a devid any of the above ways, register it */
1626         if (rc == DDI_SUCCESS)
1627                 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1628 
1629 }
1630 
1631 /*
1632  * Build a devid from the model and serial number
1633  * Return DDI_SUCCESS or DDI_FAILURE.
1634  */
1635 static int
1636 cmdk_devid_modser(struct cmdk *dkp)
1637 {
1638         int     rc = DDI_FAILURE;
1639         char    *hwid;
1640         int     modlen;
1641         int     serlen;
1642 
1643         /*
1644          * device ID is a concatenation of model number, '=', serial number.
1645          */
1646         hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1647         modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1648         if (modlen == 0) {
1649                 rc = DDI_FAILURE;
1650                 goto err;
1651         }
1652         hwid[modlen++] = '=';
1653         serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1654             hwid + modlen, CMDK_HWIDLEN - modlen);
1655         if (serlen == 0) {
1656                 rc = DDI_FAILURE;
1657                 goto err;
1658         }
1659         hwid[modlen + serlen] = 0;
1660 
1661         /* Initialize the device ID, trailing NULL not included */
1662         rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1663             hwid, &dkp->dk_devid);
1664         if (rc != DDI_SUCCESS) {
1665                 rc = DDI_FAILURE;
1666                 goto err;
1667         }
1668 
1669         rc = DDI_SUCCESS;
1670 
1671 err:
1672         kmem_free(hwid, CMDK_HWIDLEN);
1673         return (rc);
1674 }
1675 
1676 static int
1677 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1678 {
1679         dadk_ioc_string_t strarg;
1680         int             rval;
1681         char            *s;
1682         char            ch;
1683         boolean_t       ret;
1684         int             i;
1685         int             tb;
1686 
1687         strarg.is_buf = buf;
1688         strarg.is_size = len;
1689         if (dadk_ioctl(DKTP_DATA,
1690             dkp->dk_dev,
1691             ioccmd,
1692             (uintptr_t)&strarg,
1693             FNATIVE | FKIOCTL,
1694             NULL,
1695             &rval) != 0)
1696                 return (0);
1697 
1698         /*
1699          * valid model/serial string must contain a non-zero non-space
1700          * trim trailing spaces/NULL
1701          */
1702         ret = B_FALSE;
1703         s = buf;
1704         for (i = 0; i < strarg.is_size; i++) {
1705                 ch = *s++;
1706                 if (ch != ' ' && ch != '\0')
1707                         tb = i + 1;
1708                 if (ch != ' ' && ch != '\0' && ch != '0')
1709                         ret = B_TRUE;
1710         }
1711 
1712         if (ret == B_FALSE)
1713                 return (0);
1714 
1715         return (tb);
1716 }
1717 
1718 /*
1719  * Read a devid from on the first block of the last track of
1720  * the last cylinder.  Make sure what we read is a valid devid.
1721  * Return DDI_SUCCESS or DDI_FAILURE.
1722  */
1723 static int
1724 cmdk_devid_read(struct cmdk *dkp)
1725 {
1726         diskaddr_t      blk;
1727         struct dk_devid *dkdevidp;
1728         uint_t          *ip;
1729         int             chksum;
1730         int             i, sz;
1731         tgdk_iob_handle handle = NULL;
1732         int             rc = DDI_FAILURE;
1733 
1734         if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0))
1735                 goto err;
1736 
1737         /* read the devid */
1738         handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1739         if (handle == NULL)
1740                 goto err;
1741 
1742         dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1743         if (dkdevidp == NULL)
1744                 goto err;
1745 
1746         /* Validate the revision */
1747         if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1748             (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1749                 goto err;
1750 
1751         /* Calculate the checksum */
1752         chksum = 0;
1753         ip = (uint_t *)dkdevidp;
1754         for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1755                 chksum ^= ip[i];
1756         if (DKD_GETCHKSUM(dkdevidp) != chksum)
1757                 goto err;
1758 
1759         /* Validate the device id */
1760         if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1761                 goto err;
1762 
1763         /* keep a copy of the device id */
1764         sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1765         dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1766         bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1767 
1768         rc = DDI_SUCCESS;
1769 
1770 err:
1771         if (handle != NULL)
1772                 (void) dadk_iob_free(DKTP_DATA, handle);
1773         return (rc);
1774 }
1775 
1776 /*
1777  * Create a devid and write it on the first block of the last track of
1778  * the last cylinder.
1779  * Return DDI_SUCCESS or DDI_FAILURE.
1780  */
1781 static int
1782 cmdk_devid_fabricate(struct cmdk *dkp)
1783 {
1784         ddi_devid_t     devid = NULL;   /* devid made by ddi_devid_init  */
1785         struct dk_devid *dkdevidp;      /* devid struct stored on disk */
1786         diskaddr_t      blk;
1787         tgdk_iob_handle handle = NULL;
1788         uint_t          *ip, chksum;
1789         int             i;
1790         int             rc = DDI_FAILURE;
1791 
1792         if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) !=
1793             DDI_SUCCESS)
1794                 goto err;
1795 
1796         if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) {
1797                 /* no device id block address */
1798                 goto err;
1799         }
1800 
1801         handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1802         if (!handle)
1803                 goto err;
1804 
1805         /* Locate the buffer */
1806         dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1807 
1808         /* Fill in the revision */
1809         bzero(dkdevidp, NBPSCTR);
1810         dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1811         dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1812 
1813         /* Copy in the device id */
1814         i = ddi_devid_sizeof(devid);
1815         if (i > DK_DEVID_SIZE)
1816                 goto err;
1817         bcopy(devid, dkdevidp->dkd_devid, i);
1818 
1819         /* Calculate the chksum */
1820         chksum = 0;
1821         ip = (uint_t *)dkdevidp;
1822         for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1823                 chksum ^= ip[i];
1824 
1825         /* Fill in the checksum */
1826         DKD_FORMCHKSUM(chksum, dkdevidp);
1827 
1828         /* write the devid */
1829         (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1830 
1831         dkp->dk_devid = devid;
1832 
1833         rc = DDI_SUCCESS;
1834 
1835 err:
1836         if (handle != NULL)
1837                 (void) dadk_iob_free(DKTP_DATA, handle);
1838 
1839         if (rc != DDI_SUCCESS && devid != NULL)
1840                 ddi_devid_free(devid);
1841 
1842         return (rc);
1843 }
1844 
1845 static void
1846 cmdk_bbh_free_alts(struct cmdk *dkp)
1847 {
1848         if (dkp->dk_alts_hdl) {
1849                 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1850                 kmem_free(dkp->dk_slc_cnt,
1851                     NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1852                 dkp->dk_alts_hdl = NULL;
1853         }
1854 }
1855 
1856 static void
1857 cmdk_bbh_reopen(struct cmdk *dkp)
1858 {
1859         tgdk_iob_handle         handle = NULL;
1860         diskaddr_t              slcb, slcn, slce;
1861         struct  alts_parttbl    *ap;
1862         struct  alts_ent        *enttblp;
1863         uint32_t                altused;
1864         uint32_t                altbase;
1865         uint32_t                altlast;
1866         int                     alts;
1867         uint16_t                vtoctag;
1868         int                     i, j;
1869 
1870         /* find slice with V_ALTSCTR tag */
1871         for (alts = 0; alts < NDKMAP; alts++) {
1872                 if (cmlb_partinfo(
1873                     dkp->dk_cmlbhandle,
1874                     alts,
1875                     &slcn,
1876                     &slcb,
1877                     NULL,
1878                     &vtoctag,
1879                     0)) {
1880                         goto empty;     /* no partition table exists */
1881                 }
1882 
1883                 if (vtoctag == V_ALTSCTR && slcn > 1)
1884                         break;
1885         }
1886         if (alts >= NDKMAP) {
1887                 goto empty;     /* no V_ALTSCTR slice defined */
1888         }
1889 
1890         /* read in ALTS label block */
1891         handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1892         if (!handle) {
1893                 goto empty;
1894         }
1895 
1896         ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1897         if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1898                 goto empty;
1899         }
1900 
1901         altused = ap->alts_ent_used; /* number of BB entries */
1902         altbase = ap->alts_ent_base; /* blk offset from begin slice */
1903         altlast = ap->alts_ent_end;  /* blk offset to last block */
1904         /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1905 
1906         if (altused == 0 ||
1907             altbase < 1 ||
1908             altbase > altlast ||
1909             altlast >= slcn) {
1910                 goto empty;
1911         }
1912         (void) dadk_iob_free(DKTP_DATA, handle);
1913 
1914         /* read in ALTS remapping table */
1915         handle = dadk_iob_alloc(DKTP_DATA,
1916             slcb + altbase,
1917             (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1918         if (!handle) {
1919                 goto empty;
1920         }
1921 
1922         enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1923         if (!enttblp) {
1924                 goto empty;
1925         }
1926 
1927         rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1928 
1929         /* allocate space for dk_slc_cnt and dk_slc_ent tables */
1930         if (dkp->dk_slc_cnt == NULL) {
1931                 dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1932                     (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1933         }
1934         dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1935 
1936         /* free previous BB table (if any) */
1937         if (dkp->dk_alts_hdl) {
1938                 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1939                 dkp->dk_alts_hdl = NULL;
1940                 dkp->dk_altused = 0;
1941         }
1942 
1943         /* save linkage to new BB table */
1944         dkp->dk_alts_hdl = handle;
1945         dkp->dk_altused = altused;
1946 
1947         /*
1948          * build indexes to BB table by slice
1949          * effectively we have
1950          *      struct alts_ent *enttblp[altused];
1951          *
1952          *      uint32_t        dk_slc_cnt[NDKMAP];
1953          *      struct alts_ent *dk_slc_ent[NDKMAP];
1954          */
1955         for (i = 0; i < NDKMAP; i++) {
1956                 if (cmlb_partinfo(
1957                     dkp->dk_cmlbhandle,
1958                     i,
1959                     &slcn,
1960                     &slcb,
1961                     NULL,
1962                     NULL,
1963                     0)) {
1964                         goto empty1;
1965                 }
1966 
1967                 dkp->dk_slc_cnt[i] = 0;
1968                 if (slcn == 0)
1969                         continue;       /* slice is not allocated */
1970 
1971                 /* last block in slice */
1972                 slce = slcb + slcn - 1;
1973 
1974                 /* find first remap entry in after beginnning of slice */
1975                 for (j = 0; j < altused; j++) {
1976                         if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1977                                 break;
1978                 }
1979                 dkp->dk_slc_ent[i] = enttblp + j;
1980 
1981                 /* count remap entrys until end of slice */
1982                 for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1983                         dkp->dk_slc_cnt[i] += 1;
1984                 }
1985         }
1986 
1987         rw_exit(&dkp->dk_bbh_mutex);
1988         return;
1989 
1990 empty:
1991         rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1992 empty1:
1993         if (handle && handle != dkp->dk_alts_hdl)
1994                 (void) dadk_iob_free(DKTP_DATA, handle);
1995 
1996         if (dkp->dk_alts_hdl) {
1997                 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1998                 dkp->dk_alts_hdl = NULL;
1999         }
2000 
2001         rw_exit(&dkp->dk_bbh_mutex);
2002 }
2003 
2004 /*ARGSUSED*/
2005 static bbh_cookie_t
2006 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
2007 {
2008         struct  bbh_handle *hp;
2009         bbh_cookie_t ckp;
2010 
2011         hp = (struct  bbh_handle *)handle;
2012         ckp = hp->h_cktab + hp->h_idx;
2013         hp->h_idx++;
2014         return (ckp);
2015 }
2016 
2017 /*ARGSUSED*/
2018 static void
2019 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
2020 {
2021         struct  bbh_handle *hp;
2022 
2023         hp = (struct  bbh_handle *)handle;
2024         kmem_free(handle, (sizeof (struct bbh_handle) +
2025             (hp->h_totck * (sizeof (struct bbh_cookie)))));
2026 }
2027 
2028 
2029 /*
2030  *      cmdk_bbh_gethandle remaps the bad sectors to alternates.
2031  *      There are 7 different cases when the comparison is made
2032  *      between the bad sector cluster and the disk section.
2033  *
2034  *      bad sector cluster      gggggggggggbbbbbbbggggggggggg
2035  *      case 1:                    ddddd
2036  *      case 2:                            -d-----
2037  *      case 3:                                      ddddd
2038  *      case 4:                          dddddddddddd
2039  *      case 5:                       ddddddd-----
2040  *      case 6:                            ---ddddddd
2041  *      case 7:                            ddddddd
2042  *
2043  *      where:  g = good sector,        b = bad sector
2044  *              d = sector in disk section
2045  *              - = disk section may be extended to cover those disk area
2046  */
2047 
2048 static opaque_t
2049 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
2050 {
2051         struct cmdk             *dkp = (struct cmdk *)bbh_data;
2052         struct bbh_handle       *hp;
2053         struct bbh_cookie       *ckp;
2054         struct alts_ent         *altp;
2055         uint32_t                alts_used;
2056         uint32_t                part = CMDKPART(bp->b_edev);
2057         daddr32_t               lastsec;
2058         long                    d_count;
2059         int                     i;
2060         int                     idx;
2061         int                     cnt;
2062 
2063         if (part >= V_NUMPAR)
2064                 return (NULL);
2065 
2066         /*
2067          * This if statement is atomic and it will succeed
2068          * if there are no bad blocks (almost always)
2069          *
2070          * so this if is performed outside of the rw_enter for speed
2071          * and then repeated inside the rw_enter for safety
2072          */
2073         if (!dkp->dk_alts_hdl) {
2074                 return (NULL);
2075         }
2076 
2077         rw_enter(&dkp->dk_bbh_mutex, RW_READER);
2078 
2079         if (dkp->dk_alts_hdl == NULL) {
2080                 rw_exit(&dkp->dk_bbh_mutex);
2081                 return (NULL);
2082         }
2083 
2084         alts_used = dkp->dk_slc_cnt[part];
2085         if (alts_used == 0) {
2086                 rw_exit(&dkp->dk_bbh_mutex);
2087                 return (NULL);
2088         }
2089         altp = dkp->dk_slc_ent[part];
2090 
2091         /*
2092          * binary search for the largest bad sector index in the alternate
2093          * entry table which overlaps or larger than the starting d_sec
2094          */
2095         i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
2096         /* if starting sector is > the largest bad sector, return */
2097         if (i == -1) {
2098                 rw_exit(&dkp->dk_bbh_mutex);
2099                 return (NULL);
2100         }
2101         /* i is the starting index.  Set altp to the starting entry addr */
2102         altp += i;
2103 
2104         d_count = bp->b_bcount >> SCTRSHFT;
2105         lastsec = GET_BP_SEC(bp) + d_count - 1;
2106 
2107         /* calculate the number of bad sectors */
2108         for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
2109                 if (lastsec < altp->bad_start)
2110                         break;
2111         }
2112 
2113         if (!cnt) {
2114                 rw_exit(&dkp->dk_bbh_mutex);
2115                 return (NULL);
2116         }
2117 
2118         /* calculate the maximum number of reserved cookies */
2119         cnt <<= 1;
2120         cnt++;
2121 
2122         /* allocate the handle */
2123         hp = (struct bbh_handle *)kmem_zalloc((sizeof (*hp) +
2124             (cnt * sizeof (*ckp))), KM_SLEEP);
2125 
2126         hp->h_idx = 0;
2127         hp->h_totck = cnt;
2128         ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
2129         ckp[0].ck_sector = GET_BP_SEC(bp);
2130         ckp[0].ck_seclen = d_count;
2131 
2132         altp = dkp->dk_slc_ent[part];
2133         altp += i;
2134         for (idx = 0; i < alts_used; i++, altp++) {
2135                 /* CASE 1: */
2136                 if (lastsec < altp->bad_start)
2137                         break;
2138 
2139                 /* CASE 3: */
2140                 if (ckp[idx].ck_sector > altp->bad_end)
2141                         continue;
2142 
2143                 /* CASE 2 and 7: */
2144                 if ((ckp[idx].ck_sector >= altp->bad_start) &&
2145                     (lastsec <= altp->bad_end)) {
2146                         ckp[idx].ck_sector = altp->good_start +
2147                             ckp[idx].ck_sector - altp->bad_start;
2148                         break;
2149                 }
2150 
2151                 /* at least one bad sector in our section.  break it. */
2152                 /* CASE 5: */
2153                 if ((lastsec >= altp->bad_start) &&
2154                     (lastsec <= altp->bad_end)) {
2155                         ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
2156                         ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
2157                         ckp[idx+1].ck_sector = altp->good_start;
2158                         break;
2159                 }
2160                 /* CASE 6: */
2161                 if ((ckp[idx].ck_sector <= altp->bad_end) &&
2162                     (ckp[idx].ck_sector >= altp->bad_start)) {
2163                         ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
2164                         ckp[idx].ck_seclen = altp->bad_end -
2165                             ckp[idx].ck_sector + 1;
2166                         ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
2167                         ckp[idx].ck_sector = altp->good_start +
2168                             ckp[idx].ck_sector - altp->bad_start;
2169                         idx++;
2170                         ckp[idx].ck_sector = altp->bad_end + 1;
2171                         continue;       /* check rest of section */
2172                 }
2173 
2174                 /* CASE 4: */
2175                 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
2176                 ckp[idx+1].ck_sector = altp->good_start;
2177                 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
2178                 idx += 2;
2179                 ckp[idx].ck_sector = altp->bad_end + 1;
2180                 ckp[idx].ck_seclen = lastsec - altp->bad_end;
2181         }
2182 
2183         rw_exit(&dkp->dk_bbh_mutex);
2184         return ((opaque_t)hp);
2185 }
2186 
2187 static int
2188 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
2189 {
2190         int     i;
2191         int     ind;
2192         int     interval;
2193         int     mystatus = -1;
2194 
2195         if (!cnt)
2196                 return (mystatus);
2197 
2198         ind = 1; /* compiler complains about possible uninitialized var */
2199         for (i = 1; i <= cnt; i <<= 1)
2200                 ind = i;
2201 
2202         for (interval = ind; interval; ) {
2203                 if ((key >= buf[ind-1].bad_start) &&
2204                     (key <= buf[ind-1].bad_end)) {
2205                         return (ind-1);
2206                 } else {
2207                         interval >>= 1;
2208                         if (key < buf[ind-1].bad_start) {
2209                                 /* record the largest bad sector index */
2210                                 mystatus = ind-1;
2211                                 if (!interval)
2212                                         break;
2213                                 ind = ind - interval;
2214                         } else {
2215                                 /*
2216                                  * if key is larger than the last element
2217                                  * then break
2218                                  */
2219                                 if ((ind == cnt) || !interval)
2220                                         break;
2221                                 if ((ind+interval) <= cnt)
2222                                         ind += interval;
2223                         }
2224                 }
2225         }
2226         return (mystatus);
2227 }