66 */
67 void
68 mem_node_add_slice(pfn_t start, pfn_t end)
69 {
70 int mnode;
71 mnodeset_t newmask, oldmask;
72
73 /*
74 * DR will pass us the first pfn that is allocatable.
75 * We need to round down to get the real start of
76 * the slice.
77 */
78 if (mem_node_physalign) {
79 start &= ~(btop(mem_node_physalign) - 1);
80 end = roundup(end, btop(mem_node_physalign)) - 1;
81 }
82
83 mnode = PFN_2_MEM_NODE(start);
84 ASSERT(mnode < max_mem_nodes);
85
86 if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
87 /*
88 * Add slice to existing node.
89 */
90 if (start < mem_node_config[mnode].physbase)
91 mem_node_config[mnode].physbase = start;
92 if (end > mem_node_config[mnode].physmax)
93 mem_node_config[mnode].physmax = end;
94 } else {
95 mem_node_config[mnode].physbase = start;
96 mem_node_config[mnode].physmax = end;
97 atomic_add_16(&num_memnodes, 1);
98 do {
99 oldmask = memnodes_mask;
100 newmask = memnodes_mask | (1ull << mnode);
101 } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
102 }
103 /*
104 * Let the common lgrp framework know about the new memory
105 */
106 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
107 }
108
109 /*
110 * Remove a PFN range from a memnode. On some platforms,
111 * the memnode will be created with physbase at the first
112 * allocatable PFN, but later deleted with the MC slice
113 * base address converted to a PFN, in which case we need
114 * to assume physbase and up.
115 */
116 void
117 mem_node_del_slice(pfn_t start, pfn_t end)
118 {
119 int mnode;
120 pgcnt_t delta_pgcnt, node_size;
121 mnodeset_t omask, nmask;
141 mem_node_config[mnode].physbase = end + 1;
142 ASSERT(end <= mem_node_config[mnode].physmax);
143 if (end == mem_node_config[mnode].physmax)
144 mem_node_config[mnode].physmax = start - 1;
145 } else {
146
147 /*
148 * Let the common lgrp framework know the mnode is
149 * leaving
150 */
151 lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
152 MEM_NODE_2_LGRPHAND(mnode));
153
154 /*
155 * Delete the whole node.
156 */
157 ASSERT(MNODE_PGCNT(mnode) == 0);
158 do {
159 omask = memnodes_mask;
160 nmask = omask & ~(1ull << mnode);
161 } while (cas64(&memnodes_mask, omask, nmask) != omask);
162 atomic_add_16(&num_memnodes, -1);
163 mem_node_config[mnode].exists = 0;
164 }
165 }
166
167 void
168 mem_node_add_range(pfn_t start, pfn_t end)
169 {
170 if (&plat_slice_add != NULL)
171 plat_slice_add(start, end);
172 else
173 mem_node_add_slice(start, end);
174 }
175
176 void
177 mem_node_del_range(pfn_t start, pfn_t end)
178 {
179 if (&plat_slice_del != NULL)
180 plat_slice_del(start, end);
181 else
203 npgs = btop(list->size);
204 mem_node_add_range(basepfn, basepfn + npgs - 1);
205 }
206 }
207 }
208
209 /*
210 * Allocate an unassigned memnode.
211 */
212 int
213 mem_node_alloc()
214 {
215 int mnode;
216 mnodeset_t newmask, oldmask;
217
218 /*
219 * Find an unused memnode. Update it atomically to prevent
220 * a first time memnode creation race.
221 */
222 for (mnode = 0; mnode < max_mem_nodes; mnode++)
223 if (cas32((uint32_t *)&mem_node_config[mnode].exists,
224 0, 1) == 0)
225 break;
226
227 if (mnode >= max_mem_nodes)
228 panic("Out of free memnodes\n");
229
230 mem_node_config[mnode].physbase = (uint64_t)-1;
231 mem_node_config[mnode].physmax = 0;
232 atomic_add_16(&num_memnodes, 1);
233 do {
234 oldmask = memnodes_mask;
235 newmask = memnodes_mask | (1ull << mnode);
236 } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
237
238 return (mnode);
239 }
240
241 /*
242 * Find the intersection between a memnode and a memlist
243 * and returns the number of pages that overlap.
244 *
245 * Grab the memlist lock to protect the list from DR operations.
246 */
247 pgcnt_t
248 mem_node_memlist_pages(int mnode, struct memlist *mlist)
249 {
250 pfn_t base, end;
251 pfn_t cur_base, cur_end;
252 pgcnt_t npgs = 0;
253 pgcnt_t pages;
254 struct memlist *pmem;
255
256 if (&plat_mem_node_intersect_range != NULL) {
|
66 */
67 void
68 mem_node_add_slice(pfn_t start, pfn_t end)
69 {
70 int mnode;
71 mnodeset_t newmask, oldmask;
72
73 /*
74 * DR will pass us the first pfn that is allocatable.
75 * We need to round down to get the real start of
76 * the slice.
77 */
78 if (mem_node_physalign) {
79 start &= ~(btop(mem_node_physalign) - 1);
80 end = roundup(end, btop(mem_node_physalign)) - 1;
81 }
82
83 mnode = PFN_2_MEM_NODE(start);
84 ASSERT(mnode < max_mem_nodes);
85
86 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
87 /*
88 * Add slice to existing node.
89 */
90 if (start < mem_node_config[mnode].physbase)
91 mem_node_config[mnode].physbase = start;
92 if (end > mem_node_config[mnode].physmax)
93 mem_node_config[mnode].physmax = end;
94 } else {
95 mem_node_config[mnode].physbase = start;
96 mem_node_config[mnode].physmax = end;
97 atomic_add_16(&num_memnodes, 1);
98 do {
99 oldmask = memnodes_mask;
100 newmask = memnodes_mask | (1ull << mnode);
101 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
102 oldmask);
103 }
104 /*
105 * Let the common lgrp framework know about the new memory
106 */
107 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
108 }
109
110 /*
111 * Remove a PFN range from a memnode. On some platforms,
112 * the memnode will be created with physbase at the first
113 * allocatable PFN, but later deleted with the MC slice
114 * base address converted to a PFN, in which case we need
115 * to assume physbase and up.
116 */
117 void
118 mem_node_del_slice(pfn_t start, pfn_t end)
119 {
120 int mnode;
121 pgcnt_t delta_pgcnt, node_size;
122 mnodeset_t omask, nmask;
142 mem_node_config[mnode].physbase = end + 1;
143 ASSERT(end <= mem_node_config[mnode].physmax);
144 if (end == mem_node_config[mnode].physmax)
145 mem_node_config[mnode].physmax = start - 1;
146 } else {
147
148 /*
149 * Let the common lgrp framework know the mnode is
150 * leaving
151 */
152 lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
153 MEM_NODE_2_LGRPHAND(mnode));
154
155 /*
156 * Delete the whole node.
157 */
158 ASSERT(MNODE_PGCNT(mnode) == 0);
159 do {
160 omask = memnodes_mask;
161 nmask = omask & ~(1ull << mnode);
162 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
163 atomic_add_16(&num_memnodes, -1);
164 mem_node_config[mnode].exists = 0;
165 }
166 }
167
168 void
169 mem_node_add_range(pfn_t start, pfn_t end)
170 {
171 if (&plat_slice_add != NULL)
172 plat_slice_add(start, end);
173 else
174 mem_node_add_slice(start, end);
175 }
176
177 void
178 mem_node_del_range(pfn_t start, pfn_t end)
179 {
180 if (&plat_slice_del != NULL)
181 plat_slice_del(start, end);
182 else
204 npgs = btop(list->size);
205 mem_node_add_range(basepfn, basepfn + npgs - 1);
206 }
207 }
208 }
209
210 /*
211 * Allocate an unassigned memnode.
212 */
213 int
214 mem_node_alloc()
215 {
216 int mnode;
217 mnodeset_t newmask, oldmask;
218
219 /*
220 * Find an unused memnode. Update it atomically to prevent
221 * a first time memnode creation race.
222 */
223 for (mnode = 0; mnode < max_mem_nodes; mnode++)
224 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
225 0, 1) == 0)
226 break;
227
228 if (mnode >= max_mem_nodes)
229 panic("Out of free memnodes\n");
230
231 mem_node_config[mnode].physbase = (uint64_t)-1;
232 mem_node_config[mnode].physmax = 0;
233 atomic_add_16(&num_memnodes, 1);
234 do {
235 oldmask = memnodes_mask;
236 newmask = memnodes_mask | (1ull << mnode);
237 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
238
239 return (mnode);
240 }
241
242 /*
243 * Find the intersection between a memnode and a memlist
244 * and returns the number of pages that overlap.
245 *
246 * Grab the memlist lock to protect the list from DR operations.
247 */
248 pgcnt_t
249 mem_node_memlist_pages(int mnode, struct memlist *mlist)
250 {
251 pfn_t base, end;
252 pfn_t cur_base, cur_end;
253 pgcnt_t npgs = 0;
254 pgcnt_t pages;
255 struct memlist *pmem;
256
257 if (&plat_mem_node_intersect_range != NULL) {
|