Print this page
6146 seg_inherit_notsup is redundant


 107         .fault          = segmap_fault,
 108         .faulta         = segmap_faulta,
 109         .setprot        = SEGMAP_BADOP(int),
 110         .checkprot      = segmap_checkprot,
 111         .kluster        = segmap_kluster,
 112         .swapout        = SEGMAP_BADOP(size_t),
 113         .sync           = SEGMAP_BADOP(int),
 114         .incore         = SEGMAP_BADOP(size_t),
 115         .lockop         = SEGMAP_BADOP(int),
 116         .getprot        = segmap_getprot,
 117         .getoffset      = segmap_getoffset,
 118         .gettype        = segmap_gettype,
 119         .getvp          = segmap_getvp,
 120         .advise         = SEGMAP_BADOP(int),
 121         .dump           = segmap_dump,
 122         .pagelock       = segmap_pagelock,
 123         .setpagesize    = SEGMAP_BADOP(int),
 124         .getmemid       = segmap_getmemid,
 125         .getpolicy      = segmap_getpolicy,
 126         .capable        = segmap_capable,
 127         .inherit        = seg_inherit_notsup,
 128 };
 129 
 130 /*
 131  * Private segmap routines.
 132  */
 133 static void     segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
 134                         size_t len, enum seg_rw rw, struct smap *smp);
 135 static void     segmap_smapadd(struct smap *smp);
 136 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
 137                         u_offset_t off, int hashid);
 138 static void     segmap_hashout(struct smap *smp);
 139 
 140 
 141 /*
 142  * Statistics for segmap operations.
 143  *
 144  * No explicit locking to protect these stats.
 145  */
 146 struct segmapcnt segmapcnt = {
 147         { "fault",              KSTAT_DATA_ULONG },




 107         .fault          = segmap_fault,
 108         .faulta         = segmap_faulta,
 109         .setprot        = SEGMAP_BADOP(int),
 110         .checkprot      = segmap_checkprot,
 111         .kluster        = segmap_kluster,
 112         .swapout        = SEGMAP_BADOP(size_t),
 113         .sync           = SEGMAP_BADOP(int),
 114         .incore         = SEGMAP_BADOP(size_t),
 115         .lockop         = SEGMAP_BADOP(int),
 116         .getprot        = segmap_getprot,
 117         .getoffset      = segmap_getoffset,
 118         .gettype        = segmap_gettype,
 119         .getvp          = segmap_getvp,
 120         .advise         = SEGMAP_BADOP(int),
 121         .dump           = segmap_dump,
 122         .pagelock       = segmap_pagelock,
 123         .setpagesize    = SEGMAP_BADOP(int),
 124         .getmemid       = segmap_getmemid,
 125         .getpolicy      = segmap_getpolicy,
 126         .capable        = segmap_capable,

 127 };
 128 
 129 /*
 130  * Private segmap routines.
 131  */
 132 static void     segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
 133                         size_t len, enum seg_rw rw, struct smap *smp);
 134 static void     segmap_smapadd(struct smap *smp);
 135 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
 136                         u_offset_t off, int hashid);
 137 static void     segmap_hashout(struct smap *smp);
 138 
 139 
 140 /*
 141  * Statistics for segmap operations.
 142  *
 143  * No explicit locking to protect these stats.
 144  */
 145 struct segmapcnt segmapcnt = {
 146         { "fault",              KSTAT_DATA_ULONG },