Print this page
cpuid for ARMv7
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv7/vm/vm_dep.h
+++ new/usr/src/uts/armv7/vm/vm_dep.h
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
14 14 */
15 15
16 16 #ifndef _VM_DEP_H
17 17 #define _VM_DEP_H
18 18
19 19 /*
20 20 * UNIX machine dependent virtual memory support for ARMv7.
21 21 */
22 22
23 23 #ifdef __cplusplus
24 24 extern "C" {
25 25 #endif
26 26
27 27 #include <sys/param.h>
28 28 #include <sys/memnode.h>
29 29
30 30 /*
31 31 * Do not use GETTICK. It is only meant to be used when timesource
32 32 * synchronization is unimportant.
33 33 */
34 34 #define GETTICK() gethrtime_unscaled()
35 35
36 36 /* tick value that should be used for random values */
37 37 extern u_longlong_t randtick(void);
38 38
39 39 #define PLCNT_SZ(ctrs_sz) panic("plcnt_sz")
40 40
41 41 #define PLCNT_INIT(addr) panic("plcnt_init")
42 42
43 43 #define PLCNT_INCR(pp, mnode, mtype, szc, flags) panic("plcnt_incr")
44 44 #define PLCNT_DECR(pp, mnode, mtype, szc, flags) panic("plcnt_decr")
45 45
46 46 /*
47 47 * Macro to update page list max counts. This is a no-op on x86, not on SPARC.
48 48 * We panic for now on ARM. It's primarily used for kcage it appears.
49 49 */
50 50 #define PLCNT_XFER_NORELOC(pp) panic("plcnt_xfer_noreloc")
51 51
52 52 /*
53 53 * Macro to modify the page list max counts when memory is added to
54 54 * the page lists during startup (add_physmem) or during a DR operation
55 55 * when memory is added (kphysm_add_memory_dynamic) or deleted
56 56 * (kphysm_del_cleanup).
57 57 */
58 58 extern void plcnt_modify_max(pfn_t, long);
59 59 #define PLCNT_MODIFY_MAX(pfn, cnt) plcnt_modify_max(pfn, cnt)
60 60
61 61 /*
62 62 * These macros are used in dealing with the page counters and its candidate
63 63 * counters. These are used as a part of coalescing our free lists.
64 64 */
65 65
66 66 /*
67 67 * The maximum number of memory ranges that exist in the system. Consider i86pc,
68 68 * there we have various ranges that exist due to legacy DMA. eg. < 16 Mb, < 4
69 69 * Gb for PCI, etc. Like sun4, this may actually just be a single number, since
70 70 * unlike on sun4, we're not going to pretend we have a kcage.
71 71 */
72 72 #define MAX_MNODE_MRANGES 1
73 73 #define MNODE_RANGE_CNT(mnode) 1
74 74 #define MNODE_MAX_MRANGE(mnode) (MAX_MNODE_MRANGES - 1)
75 75 #define MTYPE_2_MRANGE(mnode, mtype) mtype
76 76
77 77
78 78 /*
79 79 * XXX These are strawman definitions based on the i86pc versions of the
80 80 * page_freelists and the page_cachelists; however, unlike i86pc we only have
81 81 * one mtype, therefore we don't bother keeping around an index for it.
82 82 *
83 83 * We index into the freelist by [mmu_page_sizes][colors]. We index into the
84 84 * cachelist by [colors].
85 85 */
86 86 extern page_t ***page_freelists;
87 87 extern page_t **page_cachelists;
88 88
89 89 #define PAGE_FREELISTS(mnode, szc, color, mtype) \
90 90 (*(page_freelists[szc] + (color)))
91 91 #define PAGE_CACHELISTS(mnode, color, mtype) \
92 92 (page_cachelists[color])
93 93
94 94 /*
95 95 * XXX This set of locks needs to be rethought with respect to mandatory page
96 96 * coloring. It was taken rather naively from i86pc
97 97 */
98 98
99 99 /*
100 100 * There are mutexes for both the page freelist
101 101 * and the page cachelist. We want enough locks to make contention
102 102 * reasonable, but not too many -- otherwise page_freelist_lock() gets
103 103 * so expensive that it becomes the bottleneck!
104 104 */
105 105
106 106 #define NPC_MUTEX 16
107 107
108 108 extern kmutex_t *fpc_mutex[NPC_MUTEX];
109 109 extern kmutex_t *cpc_mutex[NPC_MUTEX];
110 110
111 111 #define PC_BIN_MUTEX(mnode, bin, flags) ((flags & PG_FREE_LIST) ? \
112 112 &fpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode] : \
113 113 &cpc_mutex[(bin) & (NPC_MUTEX - 1)][mnode])
114 114
115 115 #define FPC_MUTEX(mnode, i) (&fpc_mutex[i][mnode])
116 116 #define CPC_MUTEX(mnode, i) (&cpc_mutex[i][mnode])
117 117
118 118 /*
119 119 * Memory node iterators. We may need something here related to colors, but we
120 120 * may not. For the time being, just panic on use for ust to get back to later.
121 121 */
122 122 #define MEM_NODE_ITERATOR_DECL(it) panic("mem_node_iterator_decl")
123 123 #define MEM_NODE_ITERATOR_INIT(pfn, mnode, szc, it) panic("mem_node_iterator_init")
124 124
125 125 /*
126 126 * XXX Do we ever interleave memory ndoes on armv6? Probably not? Does coloring
127 127 * come into play here?
128 128 */
129 129 #define HPM_COUNTERS_LIMITS(mnodes, pyysbase, physmax, first) \
130 130 panic("hpm_counters_list")
131 131
132 132 #define PAGE_CTRS_WRITE_LOCK(mnode) panic("page_ctrs_write_lock")
133 133 #define PAGE_CTRS_WRITE_UNLOCK(mnode) panic("page_ctrs_write_unlock")
134 134 #define PAGE_CTRS_ADJUST(pfn, cnt, rv) panic("page_cntrs_adjust")
135 135
136 136 /*
137 137 * Coloring related macros. For more on coloring, see uts/armv7/vm/vm_machdep.c.
138 138 */
139 139 #define PAGE_GET_COLOR_SHIFT(szc, nszc) \
140 140 (hw_page_array[(nszc)].hp_shift - hw_page_array[(szc)].hp_shift)
141 141
142 142 #define PAGE_CONVERT_COLOR(ncolor, szc, nszc) \
143 143 ((ncolor) << PAGE_GET_COLOR_SHIFT((szc), (nszc)))
144 144
145 145 #define PFN_2_COLOR(pfn, szc, it) \
146 146 (((pfn) & page_colors_mask) >> \
147 147 (hw_page_array[szc].hp_shift - hw_page_array[0].hp_shift))
148 148
149 149 #define PNUM_SIZE(szc) \
150 150 (hw_page_array[(szc)].hp_pgcnt)
151 151 #define PNUM_SHIFT(szc) \
152 152 (hw_page_array[(szc)].hp_shift - hw_page_array[0].hp_shift)
153 153 #define PAGE_GET_SIZE(szc) \
154 154 (hw_page_array[(szc)].hp_size)
155 155 #define PAGE_GET_SHIFT(szc) \
156 156 (hw_page_array[(szc)].hp_shift)
157 157 #define PAGE_GET_PAGECOLORS(szc) \
158 158 (hw_page_array[(szc)].hp_colors)
159 159
160 160 #define PAGE_NEXT_PFN_FOR_COLOR(pfn, szc, color, ceq_mask, color_mask, it) \
161 161 panic("page_next_pfn_for_color")
162 162
163 163 /* get the color equivalency mask for the next szc */
164 164 #define PAGE_GET_NSZ_MASK(szc, mask) \
165 165 ((mask) >> (PAGE_GET_SHIFT((szc) + 1) - PAGE_GET_SHIFT(szc)))
166 166
167 167 /* get the color of the next szc */
168 168 #define PAGE_GET_NSZ_COLOR(szc, color) \
169 169 ((color) >> (PAGE_GET_SHIFT((szc) + 1) - PAGE_GET_SHIFT(szc)))
170 170
171 171 /* Find the bin for the given page if it was of size szc */
172 172 #define PP_2_BIN_SZC(pp, szc) (PFN_2_COLOR(pp->p_pagenum, szc, NULL))
173 173
174 174 #define PP_2_BIN(pp) (PP_2_BIN_SZC(pp, pp->p_szc))
175 175
176 176 #define PP_2_MEM_NODE(pp) (0)
177 177 #define PP_2_MTYPE(pp) (0)
178 178 #define PP_2_SZC(pp) (pp->p_szc)
179 179
180 180 #define SZCPAGES(szc) (1 << PAGE_BSZS_SHIFT(szc))
181 181 #define PFN_BASE(pfnum, szc) (pfnum & ~(SZCPAGES(szc) - 1))
182 182
183 183 /*
184 184 * XXX These are total strawmen based on i86pc and sun4 for walking the page
185 185 * tables.
186 186 */
187 187 typedef struct page_list_walker {
188 188 uint_t plw_colors; /* num of colors for szc */
189 189 uint_t plw_color_mask; /* colors-1 */
190 190 uint_t plw_bin_step; /* next bin: 1 or 2 */
191 191 uint_t plw_count; /* loop count */
192 192 uint_t plw_bin0; /* starting bin */
193 193 uint_t plw_bin_marker; /* bin after initial jump */
194 194 uint_t plw_bin_split_prev; /* last bin we tried to split */
195 195 uint_t plw_do_split; /* set if OK to split */
196 196 uint_t plw_split_next; /* next bin to split */
197 197 uint_t plw_ceq_dif; /* number of different color groups */
198 198 /* to check */
199 199 uint_t plw_ceq_mask[MMU_PAGE_SIZES + 1]; /* color equiv mask */
200 200 uint_t plw_bins[MMU_PAGE_SIZES + 1]; /* num of bins */
201 201 } page_list_walker_t;
202 202
203 203 extern void page_list_walk_init(uchar_t szc, uint_t flags, uint_t bin,
204 204 int can_split, int use_ceq, page_list_walker_t *plw);
205 205
206 206 extern struct cpu cpus[];
207 207 #define CPU0 &cpus[0]
208 208
209 209 /*
210 210 * XXX memory type initializaiton
211 211 */
212 212 #define MTYPE_INIT(mtype, vp, vaddr, flags, pgsz) panic("mtype_init")
213 213 #define MTYPE_START(mnode, mtype, flags) panic("mtype_start")
214 214 #define MTYPE_NEXT(mnode, mtype, flags) panic("mtype_next")
215 215 #define MTYPE_PGR_INIT(mtype, flags, pp, mnode, pgcnt) panic("mtype_pgr_init")
216 216 #define MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi) panic("mnodetype_2_pfn")
217 217
218 218 #ifdef DEBUG
219 219 #define CHK_LPG(pp, szc) panic("chk_lpg")
220 220 #else
221 221 #define CHK_LPG(pp, szc)
222 222 #endif
223 223
224 224 #define FULL_REGION_CNT(rg_szc) \
225 225 (PAGE_GET_SIZE(rg_szc) >> PAGE_GET_SHIFT(rg_szc - 1))
226 226
227 227 /* Return the leader for this mapping size */
228 228 #define PP_GROUPLEADER(pp, szc) \
229 229 (&(pp)[-(int)((pp)->p_pagenum & (SZCPAGES(szc)-1))])
230 230
231 231 /* Return the root page for this page based on p_szc */
232 232 #define PP_PAGEROOT(pp) ((pp)->p_szc == 0 ? (pp) : \
233 233 PP_GROUPLEADER((pp), (pp)->p_szc))
234 234
235 235 /*
236 236 * The counter base must be per page_counter element to prevent
237 237 * races when re-indexing, and the base page size element should
238 238 * be aligned on a boundary of the given region size.
239 239 *
240 240 * We also round up the number of pages spanned by the counters
241 241 * for a given region to PC_BASE_ALIGN in certain situations to simplify
242 242 * the coding for some non-performance critical routines.
243 243 */
244 244 #define PC_BASE_ALIGN ((pfn_t)1 << PAGE_BSZS_SHIFT(mmu_page_sizes-1))
245 245 #define PC_BASE_ALIGN_MASK (PC_BASE_ALIGN - 1)
246 246
247 247 /*
248 248 * The following three constants describe the set of page sizes that are
249 249 * supported by the hardware. Note that there is a notion of legacy page sizes
250 250 * for certain applications. However, such applications don't exist on ARMv7, so
251 251 * they'll always get the same data.
252 252 */
253 253 extern uint_t mmu_page_sizes;
254 254 extern uint_t mmu_exported_page_sizes;
255 255 extern uint_t mmu_legacy_page_sizes;
256 256
257 257 /*
258 258 * These macros are used for converting between userland page sizes and kernel
259 259 * page sizes. However, these are the same on ARMv7 (just like i86pc).
260 260 */
↓ open down ↓ |
260 lines elided |
↑ open up ↑ |
261 261 #define USERSZC_2_SZC(userszc) userszc
262 262 #define SZC_2_USERSZC(szc) szc
263 263
264 264 /*
265 265 * for hw_page_map_t, sized to hold the ratio of large page to base
266 266 * pagesize
267 267 */
268 268 typedef short hpmctr_t;
269 269
270 270 /*
271 - * On ARMv6 the layer two cache isn't architecturally defined. A given
272 - * implementation may or may not support it. The maximum size appears to be
273 - * 64-bytes; however, we end up having to defer to the individual platforms for
274 - * more information. Because of this, we also get and use the l1 cache
275 - * information. This is further complicated by the fact that the I-cache and
276 - * D-cache are separate usually; therefore we us the the l1 d-cache for
277 - * CPUSETSIZE().
271 + * get the setsize of the current cpu
272 + *
273 + * This is complicated by the fact that the I-cache and D-cache may be
274 + * separate.
278 275 */
279 -extern int armv6_cachesz, armv6_cache_assoc;
280 -extern int armv6_l2cache_size, armv6_l2cache_linesz;
281 -#define L2CACHE_ALIGN armv6_l2cache_linesz
276 +extern int l2cache_sz, l2cache_linesz, l2cache_assoc;
277 +#define L2CACHE_ALIGN l2cache_linesz
282 278 #define L2CACHE_ALIGN_MAX 64
283 -#define CPUSETSIZE() (armv6_cachesz / armv6_cache_assoc)
279 +#define CPUSETSIZE() (l2cache_sz / l2cache_assoc)
284 280
285 281 /*
286 282 * Return the log2(pagesize(szc) / MMU_PAGESIZE) --- or the shift count
287 283 * for the number of base pages in this pagesize
288 284 */
289 285 #define PAGE_BSZS_SHIFT(szc) (PNUM_SHIFT(szc) - MMU_PAGESHIFT)
290 286
291 287 /*
292 288 * Internal PG_ flags.
293 289 */
294 290 #define PGI_RELOCONLY 0x010000 /* opposite of PG_NORELOC */
295 291 #define PGI_NOCAGE 0x020000 /* cage is disabled */
296 292 #define PGI_PGCPHIPRI 0x040000 /* page_get_contig_page pri alloc */
297 293 #define PGI_PGCPSZC0 0x080000 /* relocate base pagesize page */
298 294
299 295 /*
300 296 * XXX Consider PGI flags for ourselves
301 297 */
302 298
303 299 #define AS_2_BIN(as, seg, vp, addr, bin, szc) panic("as_2_bin")
304 300
305 301 /*
306 302 * XXX For the moment, we'll use the same value for VM_CPU_DATA_PADSIZE that
307 303 * is used on other platforms. We don't use this at all, but it's required for
308 304 * stuff like vm_pagelist.c to build. We should figure out what the right answer
309 305 * looks like here.
310 306 */
311 307 /*
312 308 * cpu private vm data - accessed thru CPU->cpu_vm_data
313 309 * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
314 310 * vc_pnext_memseg: tracks last memseg visited in page_nextn()
315 311 * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
316 312 * vc_kmsize: orignal kmem size for this vm_cpu_data_t
317 313 */
318 314
319 315 typedef struct {
320 316 struct memseg *vc_pnum_memseg;
321 317 struct memseg *vc_pnext_memseg;
322 318 void *vc_kmptr;
323 319 size_t vc_kmsize;
324 320 } vm_cpu_data_t;
325 321
326 322 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */
327 323 #define VM_CPU_DATA_PADSIZE \
328 324 (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
329 325
330 326 /*
331 327 * When a bin is empty, and we can't satisfy a color request correctly,
332 328 * we scan. If we assume that the programs have reasonable spatial
333 329 * behavior, then it will not be a good idea to use the adjacent color.
334 330 * Using the adjacent color would result in virtually adjacent addresses
335 331 * mapping into the same spot in the cache. So, if we stumble across
336 332 * an empty bin, skip a bunch before looking. After the first skip,
337 333 * then just look one bin at a time so we don't miss our cache on
338 334 * every look. Be sure to check every bin. Page_create() will panic
339 335 * if we miss a page.
340 336 *
341 337 * This also explains the `<=' in the for loops in both page_get_freelist()
342 338 * and page_get_cachelist(). Since we checked the target bin, skipped
343 339 * a bunch, then continued one a time, we wind up checking the target bin
344 340 * twice to make sure we get all of them bins.
345 341 */
346 342 #define BIN_STEP 19
347 343
348 344 /*
349 345 * TODO We should re-evaluate this at some point. This is a reasonable set of
350 346 * stats that both i86pc and sun4 have, which likely the common code all
351 347 * requires. We may find that we want additional stats here.
352 348 */
353 349 #ifdef VM_STATS
354 350 struct vmm_vmstats_str {
355 351 ulong_t pgf_alloc[MMU_PAGE_SIZES]; /* page_get_freelist */
356 352 ulong_t pgf_allocok[MMU_PAGE_SIZES];
357 353 ulong_t pgf_allocokrem[MMU_PAGE_SIZES];
358 354 ulong_t pgf_allocfailed[MMU_PAGE_SIZES];
359 355 ulong_t pgf_allocdeferred;
360 356 ulong_t pgf_allocretry[MMU_PAGE_SIZES];
361 357 ulong_t pgc_alloc; /* page_get_cachelist */
362 358 ulong_t pgc_allocok;
363 359 ulong_t pgc_allocokrem;
364 360 ulong_t pgc_allocokdeferred;
365 361 ulong_t pgc_allocfailed;
366 362 ulong_t pgcp_alloc[MMU_PAGE_SIZES]; /* page_get_contig_pages */
367 363 ulong_t pgcp_allocfailed[MMU_PAGE_SIZES];
368 364 ulong_t pgcp_allocempty[MMU_PAGE_SIZES];
369 365 ulong_t pgcp_allocok[MMU_PAGE_SIZES];
370 366 ulong_t ptcp[MMU_PAGE_SIZES]; /* page_trylock_contig_pages */
371 367 ulong_t ptcpfreethresh[MMU_PAGE_SIZES];
372 368 ulong_t ptcpfailexcl[MMU_PAGE_SIZES];
373 369 ulong_t ptcpfailszc[MMU_PAGE_SIZES];
374 370 ulong_t ptcpfailcage[MMU_PAGE_SIZES];
375 371 ulong_t ptcpok[MMU_PAGE_SIZES];
376 372 ulong_t pgmf_alloc[MMU_PAGE_SIZES]; /* page_get_mnode_freelist */
377 373 ulong_t pgmf_allocfailed[MMU_PAGE_SIZES];
378 374 ulong_t pgmf_allocempty[MMU_PAGE_SIZES];
379 375 ulong_t pgmf_allocok[MMU_PAGE_SIZES];
380 376 ulong_t pgmc_alloc; /* page_get_mnode_cachelist */
381 377 ulong_t pgmc_allocfailed;
382 378 ulong_t pgmc_allocempty;
383 379 ulong_t pgmc_allocok;
384 380 ulong_t pladd_free[MMU_PAGE_SIZES]; /* page_list_add/sub */
385 381 ulong_t plsub_free[MMU_PAGE_SIZES];
386 382 ulong_t pladd_cache;
387 383 ulong_t plsub_cache;
388 384 ulong_t plsubpages_szcbig;
389 385 ulong_t plsubpages_szc0;
390 386 ulong_t pfs_req[MMU_PAGE_SIZES]; /* page_freelist_split */
391 387 ulong_t pfs_demote[MMU_PAGE_SIZES];
392 388 ulong_t pfc_coalok[MMU_PAGE_SIZES][MAX_MNODE_MRANGES];
393 389 ulong_t ppr_reloc[MMU_PAGE_SIZES]; /* page_relocate */
394 390 ulong_t ppr_relocnoroot[MMU_PAGE_SIZES];
395 391 ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES];
396 392 ulong_t ppr_relocnolock[MMU_PAGE_SIZES];
397 393 ulong_t ppr_relocnomem[MMU_PAGE_SIZES];
398 394 ulong_t ppr_relocok[MMU_PAGE_SIZES];
399 395 ulong_t ppr_copyfail;
400 396 /* page coalesce counter */
401 397 ulong_t page_ctrs_coalesce[MMU_PAGE_SIZES][MAX_MNODE_MRANGES];
402 398 /* candidates useful */
403 399 ulong_t page_ctrs_cands_skip[MMU_PAGE_SIZES][MAX_MNODE_MRANGES];
404 400 /* ctrs changed after locking */
405 401 ulong_t page_ctrs_changed[MMU_PAGE_SIZES][MAX_MNODE_MRANGES];
406 402 /* page_freelist_coalesce failed */
407 403 ulong_t page_ctrs_failed[MMU_PAGE_SIZES][MAX_MNODE_MRANGES];
408 404 ulong_t page_ctrs_coalesce_all; /* page coalesce all counter */
409 405 ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */
410 406 ulong_t restrict4gcnt;
411 407 ulong_t unrestrict16mcnt; /* non-DMA 16m allocs allowed */
412 408 ulong_t pgpanicalloc; /* PG_PANIC allocation */
413 409 ulong_t pcf_deny[MMU_PAGE_SIZES]; /* page_chk_freelist */
414 410 ulong_t pcf_allow[MMU_PAGE_SIZES];
415 411 };
416 412 extern struct vmm_vmstats_str vmm_vmstats;
417 413 #endif /* VM_STATS */
418 414
419 415
420 416 #ifdef __cplusplus
421 417 }
422 418 #endif
423 419
424 420 #endif /* _VM_DEP_H */
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX