Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
patch lower-case-segops
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout


 411 boot_alloc(void *inaddr, size_t size, uint_t align)
 412 {
 413         caddr_t addr = inaddr;
 414 
 415         if (bootops == NULL)
 416                 prom_panic("boot_alloc: attempt to allocate memory after "
 417                     "BOP_GONE");
 418 
 419         size = ptob(btopr(size));
 420 #ifdef __sparc
 421         if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
 422                 panic("boot_alloc: bop_alloc_chunk failed");
 423 #else
 424         if (BOP_ALLOC(bootops, addr, size, align) != addr)
 425                 panic("boot_alloc: BOP_ALLOC failed");
 426 #endif
 427         boot_mapin((caddr_t)addr, size);
 428         return (addr);
 429 }
 430 
 431 static void
 432 segkmem_badop()
 433 {
 434         panic("segkmem_badop");
 435 }
 436 
 437 #define SEGKMEM_BADOP(t)        (t(*)())segkmem_badop
 438 
 439 /*ARGSUSED*/
 440 static faultcode_t
 441 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
 442         enum fault_type type, enum seg_rw rw)
 443 {
 444         pgcnt_t npages;
 445         spgcnt_t pg;
 446         page_t *pp;
 447         struct vnode *vp = seg->s_data;
 448 
 449         ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
 450 
 451         if (seg->s_as != &kas || size > seg->s_size ||
 452             addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 453                 panic("segkmem_fault: bad args");
 454 
 455         /*
 456          * If it is one of segkp pages, call segkp_fault.
 457          */
 458         if (segkp_bitmap && seg == &kvseg &&
 459             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 460                 return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
 461 
 462         if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
 463                 return (FC_NOSUPPORT);
 464 
 465         npages = btopr(size);
 466 
 467         switch (type) {
 468         case F_SOFTLOCK:        /* lock down already-loaded translations */
 469                 for (pg = 0; pg < npages; pg++) {
 470                         pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
 471                             SE_SHARED);
 472                         if (pp == NULL) {
 473                                 /*
 474                                  * Hmm, no page. Does a kernel mapping
 475                                  * exist for it?
 476                                  */
 477                                 if (!hat_probe(kas.a_hat, addr)) {
 478                                         addr -= PAGESIZE;
 479                                         while (--pg >= 0) {
 480                                                 pp = page_find(vp, (u_offset_t)


 502         default:
 503                 return (FC_NOSUPPORT);
 504         }
 505         /*NOTREACHED*/
 506 }
 507 
 508 static int
 509 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
 510 {
 511         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 512 
 513         if (seg->s_as != &kas || size > seg->s_size ||
 514             addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 515                 panic("segkmem_setprot: bad args");
 516 
 517         /*
 518          * If it is one of segkp pages, call segkp.
 519          */
 520         if (segkp_bitmap && seg == &kvseg &&
 521             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 522                 return (SEGOP_SETPROT(segkp, addr, size, prot));
 523 
 524         if (prot == 0)
 525                 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
 526         else
 527                 hat_chgprot(kas.a_hat, addr, size, prot);
 528         return (0);
 529 }
 530 
 531 /*
 532  * This is a dummy segkmem function overloaded to call segkp
 533  * when segkp is under the heap.
 534  */
 535 /* ARGSUSED */
 536 static int
 537 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
 538 {
 539         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 540 
 541         if (seg->s_as != &kas)
 542                 segkmem_badop();
 543 
 544         /*
 545          * If it is one of segkp pages, call into segkp.
 546          */
 547         if (segkp_bitmap && seg == &kvseg &&
 548             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 549                 return (SEGOP_CHECKPROT(segkp, addr, size, prot));
 550 
 551         segkmem_badop();
 552         return (0);
 553 }
 554 
 555 /*
 556  * This is a dummy segkmem function overloaded to call segkp
 557  * when segkp is under the heap.
 558  */
 559 /* ARGSUSED */
 560 static int
 561 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 562 {
 563         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 564 
 565         if (seg->s_as != &kas)
 566                 segkmem_badop();
 567 
 568         /*
 569          * If it is one of segkp pages, call into segkp.
 570          */
 571         if (segkp_bitmap && seg == &kvseg &&
 572             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 573                 return (SEGOP_KLUSTER(segkp, addr, delta));
 574 
 575         segkmem_badop();
 576         return (0);
 577 }
 578 
 579 static void
 580 segkmem_xdump_range(void *arg, void *start, size_t size)
 581 {
 582         struct as *as = arg;
 583         caddr_t addr = start;
 584         caddr_t addr_end = addr + size;
 585 
 586         while (addr < addr_end) {
 587                 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
 588                 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
 589                         dump_addpage(as, addr, pfn);
 590                 addr += PAGESIZE;
 591                 dump_timeleft = dump_timeout;
 592         }
 593 }
 594 
 595 static void


 674  * will handle the range via as_fault(F_SOFTLOCK).
 675  */
 676 /*ARGSUSED*/
 677 static int
 678 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
 679         page_t ***ppp, enum lock_type type, enum seg_rw rw)
 680 {
 681         page_t **pplist, *pp;
 682         pgcnt_t npages;
 683         spgcnt_t pg;
 684         size_t nb;
 685         struct vnode *vp = seg->s_data;
 686 
 687         ASSERT(ppp != NULL);
 688 
 689         /*
 690          * If it is one of segkp pages, call into segkp.
 691          */
 692         if (segkp_bitmap && seg == &kvseg &&
 693             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 694                 return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
 695 
 696         npages = btopr(len);
 697         nb = sizeof (page_t *) * npages;
 698 
 699         if (type == L_PAGEUNLOCK) {
 700                 pplist = *ppp;
 701                 ASSERT(pplist != NULL);
 702 
 703                 for (pg = 0; pg < npages; pg++) {
 704                         pp = pplist[pg];
 705                         page_unlock(pp);
 706                 }
 707                 kmem_free(pplist, nb);
 708                 return (0);
 709         }
 710 
 711         ASSERT(type == L_PAGELOCK);
 712 
 713         pplist = kmem_alloc(nb, KM_NOSLEEP);
 714         if (pplist == NULL) {


 727                 }
 728                 pplist[pg] = pp;
 729                 addr += PAGESIZE;
 730         }
 731 
 732         *ppp = pplist;
 733         return (0);
 734 }
 735 
 736 /*
 737  * This is a dummy segkmem function overloaded to call segkp
 738  * when segkp is under the heap.
 739  */
 740 /* ARGSUSED */
 741 static int
 742 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
 743 {
 744         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 745 
 746         if (seg->s_as != &kas)
 747                 segkmem_badop();
 748 
 749         /*
 750          * If it is one of segkp pages, call into segkp.
 751          */
 752         if (segkp_bitmap && seg == &kvseg &&
 753             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 754                 return (SEGOP_GETMEMID(segkp, addr, memidp));
 755 
 756         segkmem_badop();
 757         return (0);
 758 }
 759 
 760 /*ARGSUSED*/
 761 static lgrp_mem_policy_info_t *
 762 segkmem_getpolicy(struct seg *seg, caddr_t addr)
 763 {
 764         return (NULL);
 765 }
 766 
 767 /*ARGSUSED*/
 768 static int
 769 segkmem_capable(struct seg *seg, segcapability_t capability)
 770 {
 771         if (capability == S_CAPABILITY_NOMINFLT)
 772                 return (1);
 773         return (0);
 774 }
 775 
 776 static struct seg_ops segkmem_ops = {
 777         SEGKMEM_BADOP(int),             /* dup */
 778         SEGKMEM_BADOP(int),             /* unmap */
 779         SEGKMEM_BADOP(void),            /* free */
 780         segkmem_fault,
 781         SEGKMEM_BADOP(faultcode_t),     /* faulta */
 782         segkmem_setprot,
 783         segkmem_checkprot,
 784         segkmem_kluster,
 785         SEGKMEM_BADOP(size_t),          /* swapout */
 786         SEGKMEM_BADOP(int),             /* sync */
 787         SEGKMEM_BADOP(size_t),          /* incore */
 788         SEGKMEM_BADOP(int),             /* lockop */
 789         SEGKMEM_BADOP(int),             /* getprot */
 790         SEGKMEM_BADOP(u_offset_t),      /* getoffset */
 791         SEGKMEM_BADOP(int),             /* gettype */
 792         SEGKMEM_BADOP(int),             /* getvp */
 793         SEGKMEM_BADOP(int),             /* advise */
 794         segkmem_dump,
 795         segkmem_pagelock,
 796         SEGKMEM_BADOP(int),             /* setpgsz */
 797         segkmem_getmemid,
 798         segkmem_getpolicy,              /* getpolicy */
 799         segkmem_capable,                /* capable */
 800         seg_inherit_notsup              /* inherit */
 801 };
 802 
 803 int
 804 segkmem_zio_create(struct seg *seg)
 805 {
 806         ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
 807         seg->s_ops = &segkmem_ops;
 808         seg->s_data = &zvp;
 809         kas.a_size += seg->s_size;
 810         return (0);
 811 }
 812 
 813 int
 814 segkmem_create(struct seg *seg)
 815 {
 816         ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
 817         seg->s_ops = &segkmem_ops;
 818         seg->s_data = &kvp;
 819         kas.a_size += seg->s_size;
 820         return (0);




 411 boot_alloc(void *inaddr, size_t size, uint_t align)
 412 {
 413         caddr_t addr = inaddr;
 414 
 415         if (bootops == NULL)
 416                 prom_panic("boot_alloc: attempt to allocate memory after "
 417                     "BOP_GONE");
 418 
 419         size = ptob(btopr(size));
 420 #ifdef __sparc
 421         if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
 422                 panic("boot_alloc: bop_alloc_chunk failed");
 423 #else
 424         if (BOP_ALLOC(bootops, addr, size, align) != addr)
 425                 panic("boot_alloc: BOP_ALLOC failed");
 426 #endif
 427         boot_mapin((caddr_t)addr, size);
 428         return (addr);
 429 }
 430 








 431 /*ARGSUSED*/
 432 static faultcode_t
 433 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
 434         enum fault_type type, enum seg_rw rw)
 435 {
 436         pgcnt_t npages;
 437         spgcnt_t pg;
 438         page_t *pp;
 439         struct vnode *vp = seg->s_data;
 440 
 441         ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
 442 
 443         if (seg->s_as != &kas || size > seg->s_size ||
 444             addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 445                 panic("segkmem_fault: bad args");
 446 
 447         /*
 448          * If it is one of segkp pages, call segkp_fault.
 449          */
 450         if (segkp_bitmap && seg == &kvseg &&
 451             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 452                 return (segop_fault(hat, segkp, addr, size, type, rw));
 453 
 454         if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
 455                 return (FC_NOSUPPORT);
 456 
 457         npages = btopr(size);
 458 
 459         switch (type) {
 460         case F_SOFTLOCK:        /* lock down already-loaded translations */
 461                 for (pg = 0; pg < npages; pg++) {
 462                         pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
 463                             SE_SHARED);
 464                         if (pp == NULL) {
 465                                 /*
 466                                  * Hmm, no page. Does a kernel mapping
 467                                  * exist for it?
 468                                  */
 469                                 if (!hat_probe(kas.a_hat, addr)) {
 470                                         addr -= PAGESIZE;
 471                                         while (--pg >= 0) {
 472                                                 pp = page_find(vp, (u_offset_t)


 494         default:
 495                 return (FC_NOSUPPORT);
 496         }
 497         /*NOTREACHED*/
 498 }
 499 
 500 static int
 501 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
 502 {
 503         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 504 
 505         if (seg->s_as != &kas || size > seg->s_size ||
 506             addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 507                 panic("segkmem_setprot: bad args");
 508 
 509         /*
 510          * If it is one of segkp pages, call segkp.
 511          */
 512         if (segkp_bitmap && seg == &kvseg &&
 513             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 514                 return (segop_setprot(segkp, addr, size, prot));
 515 
 516         if (prot == 0)
 517                 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
 518         else
 519                 hat_chgprot(kas.a_hat, addr, size, prot);
 520         return (0);
 521 }
 522 
 523 /*
 524  * This is a dummy segkmem function overloaded to call segkp
 525  * when segkp is under the heap.
 526  */
 527 /* ARGSUSED */
 528 static int
 529 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
 530 {
 531         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 532 
 533         if (seg->s_as != &kas)
 534                 panic("segkmem badop");
 535 
 536         /*
 537          * If it is one of segkp pages, call into segkp.
 538          */
 539         if (segkp_bitmap && seg == &kvseg &&
 540             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 541                 return (segop_checkprot(segkp, addr, size, prot));
 542 
 543         panic("segkmem badop");
 544         return (0);
 545 }
 546 
 547 /*
 548  * This is a dummy segkmem function overloaded to call segkp
 549  * when segkp is under the heap.
 550  */
 551 /* ARGSUSED */
 552 static int
 553 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 554 {
 555         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 556 
 557         if (seg->s_as != &kas)
 558                 panic("segkmem badop");
 559 
 560         /*
 561          * If it is one of segkp pages, call into segkp.
 562          */
 563         if (segkp_bitmap && seg == &kvseg &&
 564             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 565                 return (segop_kluster(segkp, addr, delta));
 566 
 567         panic("segkmem badop");
 568         return (0);
 569 }
 570 
 571 static void
 572 segkmem_xdump_range(void *arg, void *start, size_t size)
 573 {
 574         struct as *as = arg;
 575         caddr_t addr = start;
 576         caddr_t addr_end = addr + size;
 577 
 578         while (addr < addr_end) {
 579                 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
 580                 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
 581                         dump_addpage(as, addr, pfn);
 582                 addr += PAGESIZE;
 583                 dump_timeleft = dump_timeout;
 584         }
 585 }
 586 
 587 static void


 666  * will handle the range via as_fault(F_SOFTLOCK).
 667  */
 668 /*ARGSUSED*/
 669 static int
 670 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
 671         page_t ***ppp, enum lock_type type, enum seg_rw rw)
 672 {
 673         page_t **pplist, *pp;
 674         pgcnt_t npages;
 675         spgcnt_t pg;
 676         size_t nb;
 677         struct vnode *vp = seg->s_data;
 678 
 679         ASSERT(ppp != NULL);
 680 
 681         /*
 682          * If it is one of segkp pages, call into segkp.
 683          */
 684         if (segkp_bitmap && seg == &kvseg &&
 685             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 686                 return (segop_pagelock(segkp, addr, len, ppp, type, rw));
 687 
 688         npages = btopr(len);
 689         nb = sizeof (page_t *) * npages;
 690 
 691         if (type == L_PAGEUNLOCK) {
 692                 pplist = *ppp;
 693                 ASSERT(pplist != NULL);
 694 
 695                 for (pg = 0; pg < npages; pg++) {
 696                         pp = pplist[pg];
 697                         page_unlock(pp);
 698                 }
 699                 kmem_free(pplist, nb);
 700                 return (0);
 701         }
 702 
 703         ASSERT(type == L_PAGELOCK);
 704 
 705         pplist = kmem_alloc(nb, KM_NOSLEEP);
 706         if (pplist == NULL) {


 719                 }
 720                 pplist[pg] = pp;
 721                 addr += PAGESIZE;
 722         }
 723 
 724         *ppp = pplist;
 725         return (0);
 726 }
 727 
 728 /*
 729  * This is a dummy segkmem function overloaded to call segkp
 730  * when segkp is under the heap.
 731  */
 732 /* ARGSUSED */
 733 static int
 734 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
 735 {
 736         ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 737 
 738         if (seg->s_as != &kas)
 739                 panic("segkmem badop");
 740 
 741         /*
 742          * If it is one of segkp pages, call into segkp.
 743          */
 744         if (segkp_bitmap && seg == &kvseg &&
 745             BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 746                 return (segop_getmemid(segkp, addr, memidp));
 747 
 748         panic("segkmem badop");
 749         return (0);
 750 }
 751 
 752 /*ARGSUSED*/







 753 static int
 754 segkmem_capable(struct seg *seg, segcapability_t capability)
 755 {
 756         if (capability == S_CAPABILITY_NOMINFLT)
 757                 return (1);
 758         return (0);
 759 }
 760 
 761 static const struct seg_ops segkmem_ops = {
 762         .fault          = segkmem_fault,
 763         .setprot        = segkmem_setprot,
 764         .checkprot      = segkmem_checkprot,
 765         .kluster        = segkmem_kluster,
 766         .dump           = segkmem_dump,
 767         .pagelock       = segkmem_pagelock,
 768         .getmemid       = segkmem_getmemid,
 769         .capable        = segkmem_capable,
















 770 };
 771 
 772 int
 773 segkmem_zio_create(struct seg *seg)
 774 {
 775         ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
 776         seg->s_ops = &segkmem_ops;
 777         seg->s_data = &zvp;
 778         kas.a_size += seg->s_size;
 779         return (0);
 780 }
 781 
 782 int
 783 segkmem_create(struct seg *seg)
 784 {
 785         ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
 786         seg->s_ops = &segkmem_ops;
 787         seg->s_data = &kvp;
 788         kas.a_size += seg->s_size;
 789         return (0);