411 boot_alloc(void *inaddr, size_t size, uint_t align)
412 {
413 caddr_t addr = inaddr;
414
415 if (bootops == NULL)
416 prom_panic("boot_alloc: attempt to allocate memory after "
417 "BOP_GONE");
418
419 size = ptob(btopr(size));
420 #ifdef __sparc
421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
422 panic("boot_alloc: bop_alloc_chunk failed");
423 #else
424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
425 panic("boot_alloc: BOP_ALLOC failed");
426 #endif
427 boot_mapin((caddr_t)addr, size);
428 return (addr);
429 }
430
431 static void
432 segkmem_badop()
433 {
434 panic("segkmem_badop");
435 }
436
437 #define SEGKMEM_BADOP(t) (t(*)())segkmem_badop
438
439 /*ARGSUSED*/
440 static faultcode_t
441 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
442 enum fault_type type, enum seg_rw rw)
443 {
444 pgcnt_t npages;
445 spgcnt_t pg;
446 page_t *pp;
447 struct vnode *vp = seg->s_data;
448
449 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
450
451 if (seg->s_as != &kas || size > seg->s_size ||
452 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
453 panic("segkmem_fault: bad args");
454
455 /*
456 * If it is one of segkp pages, call segkp_fault.
457 */
458 if (segkp_bitmap && seg == &kvseg &&
522 return (segop_setprot(segkp, addr, size, prot));
523
524 if (prot == 0)
525 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
526 else
527 hat_chgprot(kas.a_hat, addr, size, prot);
528 return (0);
529 }
530
531 /*
532 * This is a dummy segkmem function overloaded to call segkp
533 * when segkp is under the heap.
534 */
535 /* ARGSUSED */
536 static int
537 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
538 {
539 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
540
541 if (seg->s_as != &kas)
542 segkmem_badop();
543
544 /*
545 * If it is one of segkp pages, call into segkp.
546 */
547 if (segkp_bitmap && seg == &kvseg &&
548 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
549 return (segop_checkprot(segkp, addr, size, prot));
550
551 segkmem_badop();
552 return (0);
553 }
554
555 /*
556 * This is a dummy segkmem function overloaded to call segkp
557 * when segkp is under the heap.
558 */
559 /* ARGSUSED */
560 static int
561 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
562 {
563 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
564
565 if (seg->s_as != &kas)
566 segkmem_badop();
567
568 /*
569 * If it is one of segkp pages, call into segkp.
570 */
571 if (segkp_bitmap && seg == &kvseg &&
572 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
573 return (segop_kluster(segkp, addr, delta));
574
575 segkmem_badop();
576 return (0);
577 }
578
579 static void
580 segkmem_xdump_range(void *arg, void *start, size_t size)
581 {
582 struct as *as = arg;
583 caddr_t addr = start;
584 caddr_t addr_end = addr + size;
585
586 while (addr < addr_end) {
587 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
588 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
589 dump_addpage(as, addr, pfn);
590 addr += PAGESIZE;
591 dump_timeleft = dump_timeout;
592 }
593 }
594
595 static void
727 }
728 pplist[pg] = pp;
729 addr += PAGESIZE;
730 }
731
732 *ppp = pplist;
733 return (0);
734 }
735
736 /*
737 * This is a dummy segkmem function overloaded to call segkp
738 * when segkp is under the heap.
739 */
740 /* ARGSUSED */
741 static int
742 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
743 {
744 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
745
746 if (seg->s_as != &kas)
747 segkmem_badop();
748
749 /*
750 * If it is one of segkp pages, call into segkp.
751 */
752 if (segkp_bitmap && seg == &kvseg &&
753 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
754 return (segop_getmemid(segkp, addr, memidp));
755
756 segkmem_badop();
757 return (0);
758 }
759
760 /*ARGSUSED*/
761 static lgrp_mem_policy_info_t *
762 segkmem_getpolicy(struct seg *seg, caddr_t addr)
763 {
764 return (NULL);
765 }
766
767 /*ARGSUSED*/
768 static int
769 segkmem_capable(struct seg *seg, segcapability_t capability)
770 {
771 if (capability == S_CAPABILITY_NOMINFLT)
772 return (1);
773 return (0);
774 }
775
776 static struct seg_ops segkmem_ops = {
777 .dup = SEGKMEM_BADOP(int),
778 .unmap = SEGKMEM_BADOP(int),
779 .free = SEGKMEM_BADOP(void),
780 .fault = segkmem_fault,
781 .faulta = SEGKMEM_BADOP(faultcode_t),
782 .setprot = segkmem_setprot,
783 .checkprot = segkmem_checkprot,
784 .kluster = segkmem_kluster,
785 .sync = SEGKMEM_BADOP(int),
786 .incore = SEGKMEM_BADOP(size_t),
787 .lockop = SEGKMEM_BADOP(int),
788 .getprot = SEGKMEM_BADOP(int),
789 .getoffset = SEGKMEM_BADOP(u_offset_t),
790 .gettype = SEGKMEM_BADOP(int),
791 .getvp = SEGKMEM_BADOP(int),
792 .advise = SEGKMEM_BADOP(int),
793 .dump = segkmem_dump,
794 .pagelock = segkmem_pagelock,
795 .setpagesize = SEGKMEM_BADOP(int),
796 .getmemid = segkmem_getmemid,
797 .getpolicy = segkmem_getpolicy,
798 .capable = segkmem_capable,
799 .inherit = seg_inherit_notsup,
800 };
801
802 int
803 segkmem_zio_create(struct seg *seg)
804 {
805 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
806 seg->s_ops = &segkmem_ops;
807 seg->s_data = &zvp;
808 kas.a_size += seg->s_size;
809 return (0);
810 }
811
812 int
813 segkmem_create(struct seg *seg)
814 {
815 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
|
411 boot_alloc(void *inaddr, size_t size, uint_t align)
412 {
413 caddr_t addr = inaddr;
414
415 if (bootops == NULL)
416 prom_panic("boot_alloc: attempt to allocate memory after "
417 "BOP_GONE");
418
419 size = ptob(btopr(size));
420 #ifdef __sparc
421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
422 panic("boot_alloc: bop_alloc_chunk failed");
423 #else
424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
425 panic("boot_alloc: BOP_ALLOC failed");
426 #endif
427 boot_mapin((caddr_t)addr, size);
428 return (addr);
429 }
430
431 /*ARGSUSED*/
432 static faultcode_t
433 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
434 enum fault_type type, enum seg_rw rw)
435 {
436 pgcnt_t npages;
437 spgcnt_t pg;
438 page_t *pp;
439 struct vnode *vp = seg->s_data;
440
441 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
442
443 if (seg->s_as != &kas || size > seg->s_size ||
444 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
445 panic("segkmem_fault: bad args");
446
447 /*
448 * If it is one of segkp pages, call segkp_fault.
449 */
450 if (segkp_bitmap && seg == &kvseg &&
514 return (segop_setprot(segkp, addr, size, prot));
515
516 if (prot == 0)
517 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
518 else
519 hat_chgprot(kas.a_hat, addr, size, prot);
520 return (0);
521 }
522
523 /*
524 * This is a dummy segkmem function overloaded to call segkp
525 * when segkp is under the heap.
526 */
527 /* ARGSUSED */
528 static int
529 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
530 {
531 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
532
533 if (seg->s_as != &kas)
534 panic("segkmem badop");
535
536 /*
537 * If it is one of segkp pages, call into segkp.
538 */
539 if (segkp_bitmap && seg == &kvseg &&
540 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
541 return (segop_checkprot(segkp, addr, size, prot));
542
543 panic("segkmem badop");
544 return (0);
545 }
546
547 /*
548 * This is a dummy segkmem function overloaded to call segkp
549 * when segkp is under the heap.
550 */
551 /* ARGSUSED */
552 static int
553 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
554 {
555 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
556
557 if (seg->s_as != &kas)
558 panic("segkmem badop");
559
560 /*
561 * If it is one of segkp pages, call into segkp.
562 */
563 if (segkp_bitmap && seg == &kvseg &&
564 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
565 return (segop_kluster(segkp, addr, delta));
566
567 panic("segkmem badop");
568 return (0);
569 }
570
571 static void
572 segkmem_xdump_range(void *arg, void *start, size_t size)
573 {
574 struct as *as = arg;
575 caddr_t addr = start;
576 caddr_t addr_end = addr + size;
577
578 while (addr < addr_end) {
579 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
580 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
581 dump_addpage(as, addr, pfn);
582 addr += PAGESIZE;
583 dump_timeleft = dump_timeout;
584 }
585 }
586
587 static void
719 }
720 pplist[pg] = pp;
721 addr += PAGESIZE;
722 }
723
724 *ppp = pplist;
725 return (0);
726 }
727
728 /*
729 * This is a dummy segkmem function overloaded to call segkp
730 * when segkp is under the heap.
731 */
732 /* ARGSUSED */
733 static int
734 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
735 {
736 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
737
738 if (seg->s_as != &kas)
739 panic("segkmem badop");
740
741 /*
742 * If it is one of segkp pages, call into segkp.
743 */
744 if (segkp_bitmap && seg == &kvseg &&
745 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
746 return (segop_getmemid(segkp, addr, memidp));
747
748 panic("segkmem badop");
749 return (0);
750 }
751
752 /*ARGSUSED*/
753 static lgrp_mem_policy_info_t *
754 segkmem_getpolicy(struct seg *seg, caddr_t addr)
755 {
756 return (NULL);
757 }
758
759 /*ARGSUSED*/
760 static int
761 segkmem_capable(struct seg *seg, segcapability_t capability)
762 {
763 if (capability == S_CAPABILITY_NOMINFLT)
764 return (1);
765 return (0);
766 }
767
768 static struct seg_ops segkmem_ops = {
769 .fault = segkmem_fault,
770 .setprot = segkmem_setprot,
771 .checkprot = segkmem_checkprot,
772 .kluster = segkmem_kluster,
773 .dump = segkmem_dump,
774 .pagelock = segkmem_pagelock,
775 .getmemid = segkmem_getmemid,
776 .getpolicy = segkmem_getpolicy,
777 .capable = segkmem_capable,
778 .inherit = seg_inherit_notsup,
779 };
780
781 int
782 segkmem_zio_create(struct seg *seg)
783 {
784 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
785 seg->s_ops = &segkmem_ops;
786 seg->s_data = &zvp;
787 kas.a_size += seg->s_size;
788 return (0);
789 }
790
791 int
792 segkmem_create(struct seg *seg)
793 {
794 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
|