80 */
81 if (mem_node_physalign) {
82 start &= ~(btop(mem_node_physalign) - 1);
83 end = roundup(end, btop(mem_node_physalign)) - 1;
84 }
85
86 mnode = PFN_2_MEM_NODE(start);
87 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
88
89 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
90 /*
91 * Add slice to existing node.
92 */
93 if (start < mem_node_config[mnode].physbase)
94 mem_node_config[mnode].physbase = start;
95 if (end > mem_node_config[mnode].physmax)
96 mem_node_config[mnode].physmax = end;
97 } else {
98 mem_node_config[mnode].physbase = start;
99 mem_node_config[mnode].physmax = end;
100 atomic_add_16(&num_memnodes, 1);
101 do {
102 oldmask = memnodes_mask;
103 newmask = memnodes_mask | (1ull << mnode);
104 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
105 oldmask);
106 }
107
108 /*
109 * Inform the common lgrp framework about the new memory
110 */
111 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
112 }
113
114 /*
115 * Remove a PFN range from a memnode. On some platforms,
116 * the memnode will be created with physbase at the first
117 * allocatable PFN, but later deleted with the MC slice
118 * base address converted to a PFN, in which case we need
119 * to assume physbase and up.
120 */
146 mem_node_config[mnode].physbase = end + 1;
147 ASSERT(end <= mem_node_config[mnode].physmax);
148 if (end == mem_node_config[mnode].physmax)
149 mem_node_config[mnode].physmax = start - 1;
150 } else {
151 /*
152 * Let the common lgrp framework know this mnode is
153 * leaving
154 */
155 lgrp_config(LGRP_CONFIG_MEM_DEL,
156 mnode, MEM_NODE_2_LGRPHAND(mnode));
157
158 /*
159 * Delete the whole node.
160 */
161 ASSERT(MNODE_PGCNT(mnode) == 0);
162 do {
163 omask = memnodes_mask;
164 nmask = omask & ~(1ull << mnode);
165 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
166 atomic_add_16(&num_memnodes, -1);
167 mem_node_config[mnode].exists = 0;
168 }
169 }
170
171 void
172 mem_node_add_range(pfn_t start, pfn_t end)
173 {
174 if (&plat_slice_add)
175 plat_slice_add(start, end);
176 else
177 mem_node_add_slice(start, end);
178 }
179
180 void
181 mem_node_del_range(pfn_t start, pfn_t end)
182 {
183 if (&plat_slice_del)
184 plat_slice_del(start, end);
185 else
186 mem_node_del_slice(start, end);
222 int
223 mem_node_alloc()
224 {
225 int mnode;
226 mnodeset_t newmask, oldmask;
227
228 /*
229 * Find an unused memnode. Update it atomically to prevent
230 * a first time memnode creation race.
231 */
232 for (mnode = 0; mnode < max_mem_nodes; mnode++)
233 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
234 0, 1) == 0)
235 break;
236
237 if (mnode >= max_mem_nodes)
238 panic("Out of free memnodes\n");
239
240 mem_node_config[mnode].physbase = (pfn_t)-1l;
241 mem_node_config[mnode].physmax = 0;
242 atomic_add_16(&num_memnodes, 1);
243 do {
244 oldmask = memnodes_mask;
245 newmask = memnodes_mask | (1ull << mnode);
246 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
247
248 return (mnode);
249 }
250
251 /*
252 * Find the intersection between a memnode and a memlist
253 * and returns the number of pages that overlap.
254 *
255 * Assumes the list is protected from DR operations by
256 * the memlist lock.
257 */
258 pgcnt_t
259 mem_node_memlist_pages(int mnode, struct memlist *mlist)
260 {
261 pfn_t base, end;
262 pfn_t cur_base, cur_end;
|
80 */
81 if (mem_node_physalign) {
82 start &= ~(btop(mem_node_physalign) - 1);
83 end = roundup(end, btop(mem_node_physalign)) - 1;
84 }
85
86 mnode = PFN_2_MEM_NODE(start);
87 ASSERT(mnode >= 0 && mnode < max_mem_nodes);
88
89 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
90 /*
91 * Add slice to existing node.
92 */
93 if (start < mem_node_config[mnode].physbase)
94 mem_node_config[mnode].physbase = start;
95 if (end > mem_node_config[mnode].physmax)
96 mem_node_config[mnode].physmax = end;
97 } else {
98 mem_node_config[mnode].physbase = start;
99 mem_node_config[mnode].physmax = end;
100 atomic_inc_16(&num_memnodes);
101 do {
102 oldmask = memnodes_mask;
103 newmask = memnodes_mask | (1ull << mnode);
104 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
105 oldmask);
106 }
107
108 /*
109 * Inform the common lgrp framework about the new memory
110 */
111 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
112 }
113
114 /*
115 * Remove a PFN range from a memnode. On some platforms,
116 * the memnode will be created with physbase at the first
117 * allocatable PFN, but later deleted with the MC slice
118 * base address converted to a PFN, in which case we need
119 * to assume physbase and up.
120 */
146 mem_node_config[mnode].physbase = end + 1;
147 ASSERT(end <= mem_node_config[mnode].physmax);
148 if (end == mem_node_config[mnode].physmax)
149 mem_node_config[mnode].physmax = start - 1;
150 } else {
151 /*
152 * Let the common lgrp framework know this mnode is
153 * leaving
154 */
155 lgrp_config(LGRP_CONFIG_MEM_DEL,
156 mnode, MEM_NODE_2_LGRPHAND(mnode));
157
158 /*
159 * Delete the whole node.
160 */
161 ASSERT(MNODE_PGCNT(mnode) == 0);
162 do {
163 omask = memnodes_mask;
164 nmask = omask & ~(1ull << mnode);
165 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
166 atomic_dec_16(&num_memnodes);
167 mem_node_config[mnode].exists = 0;
168 }
169 }
170
171 void
172 mem_node_add_range(pfn_t start, pfn_t end)
173 {
174 if (&plat_slice_add)
175 plat_slice_add(start, end);
176 else
177 mem_node_add_slice(start, end);
178 }
179
180 void
181 mem_node_del_range(pfn_t start, pfn_t end)
182 {
183 if (&plat_slice_del)
184 plat_slice_del(start, end);
185 else
186 mem_node_del_slice(start, end);
222 int
223 mem_node_alloc()
224 {
225 int mnode;
226 mnodeset_t newmask, oldmask;
227
228 /*
229 * Find an unused memnode. Update it atomically to prevent
230 * a first time memnode creation race.
231 */
232 for (mnode = 0; mnode < max_mem_nodes; mnode++)
233 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
234 0, 1) == 0)
235 break;
236
237 if (mnode >= max_mem_nodes)
238 panic("Out of free memnodes\n");
239
240 mem_node_config[mnode].physbase = (pfn_t)-1l;
241 mem_node_config[mnode].physmax = 0;
242 atomic_inc_16(&num_memnodes);
243 do {
244 oldmask = memnodes_mask;
245 newmask = memnodes_mask | (1ull << mnode);
246 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
247
248 return (mnode);
249 }
250
251 /*
252 * Find the intersection between a memnode and a memlist
253 * and returns the number of pages that overlap.
254 *
255 * Assumes the list is protected from DR operations by
256 * the memlist lock.
257 */
258 pgcnt_t
259 mem_node_memlist_pages(int mnode, struct memlist *mlist)
260 {
261 pfn_t base, end;
262 pfn_t cur_base, cur_end;
|