758
759 switch (cmd) {
760 case DDI_DETACH:
761 break;
762
763 case DDI_SUSPEND:
764 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
765 return (DDI_FAILURE);
766 }
767
768 mutex_enter(&pp->umutex);
769 ASSERT(pp->suspended == FALSE);
770
771 pp->suspended = TRUE; /* prevent new transfers */
772
773 /*
774 * Wait if there's any activity on the port
775 */
776 if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
777 (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
778 SUSPEND_TOUT * drv_usectohz(1000000),
779 TR_CLOCK_TICK);
780 if ((pp->e_busy == ECPP_BUSY) ||
781 (pp->e_busy == ECPP_FLUSH)) {
782 pp->suspended = FALSE;
783 mutex_exit(&pp->umutex);
784 ecpp_error(pp->dip,
785 "ecpp_detach: suspend timeout\n");
786 return (DDI_FAILURE);
787 }
788 }
789
790 mutex_exit(&pp->umutex);
791 return (DDI_SUCCESS);
792
793 default:
794 return (DDI_FAILURE);
795 }
796
797 pp = ddi_get_soft_state(ecppsoft_statep, instance);
798 #if defined(__x86)
2787 if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
2788 if (ecp_reverse2forward(pp) == FAILURE) {
2789 if (pp->msg) {
2790 (void) putbq(pp->writeq, pp->msg);
2791 } else {
2792 ecpp_putback_untransfered(pp,
2793 addr, len);
2794 }
2795 }
2796 }
2797
2798 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2799 return;
2800 }
2801
2802 break;
2803 }
2804
2805 /* schedule transfer timeout */
2806 pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
2807 pp->xfer_parms.write_timeout * drv_usectohz(1000000));
2808 }
2809
2810 /*
2811 * Transfer a PIO "block" a byte at a time.
2812 * The block is starts at addr and ends at pp->last_byte
2813 */
2814 static uint8_t
2815 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2816 {
2817 pp->next_byte = addr;
2818 pp->last_byte = (caddr_t)((ulong_t)addr + len);
2819
2820 if (ecpp_check_status(pp) == FAILURE) {
2821 /*
2822 * if status signals are bad, do not start PIO,
2823 * put everything back on the queue.
2824 */
2825 ecpp_error(pp->dip,
2826 "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
2827
|
758
759 switch (cmd) {
760 case DDI_DETACH:
761 break;
762
763 case DDI_SUSPEND:
764 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
765 return (DDI_FAILURE);
766 }
767
768 mutex_enter(&pp->umutex);
769 ASSERT(pp->suspended == FALSE);
770
771 pp->suspended = TRUE; /* prevent new transfers */
772
773 /*
774 * Wait if there's any activity on the port
775 */
776 if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
777 (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
778 drv_sectohz(SUSPEND_TOUT),
779 TR_CLOCK_TICK);
780 if ((pp->e_busy == ECPP_BUSY) ||
781 (pp->e_busy == ECPP_FLUSH)) {
782 pp->suspended = FALSE;
783 mutex_exit(&pp->umutex);
784 ecpp_error(pp->dip,
785 "ecpp_detach: suspend timeout\n");
786 return (DDI_FAILURE);
787 }
788 }
789
790 mutex_exit(&pp->umutex);
791 return (DDI_SUCCESS);
792
793 default:
794 return (DDI_FAILURE);
795 }
796
797 pp = ddi_get_soft_state(ecppsoft_statep, instance);
798 #if defined(__x86)
2787 if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
2788 if (ecp_reverse2forward(pp) == FAILURE) {
2789 if (pp->msg) {
2790 (void) putbq(pp->writeq, pp->msg);
2791 } else {
2792 ecpp_putback_untransfered(pp,
2793 addr, len);
2794 }
2795 }
2796 }
2797
2798 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2799 return;
2800 }
2801
2802 break;
2803 }
2804
2805 /* schedule transfer timeout */
2806 pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
2807 drv_sectohz(pp->xfer_parms.write_timeout));
2808 }
2809
2810 /*
2811 * Transfer a PIO "block" a byte at a time.
2812 * The block is starts at addr and ends at pp->last_byte
2813 */
2814 static uint8_t
2815 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2816 {
2817 pp->next_byte = addr;
2818 pp->last_byte = (caddr_t)((ulong_t)addr + len);
2819
2820 if (ecpp_check_status(pp) == FAILURE) {
2821 /*
2822 * if status signals are bad, do not start PIO,
2823 * put everything back on the queue.
2824 */
2825 ecpp_error(pp->dip,
2826 "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
2827
|