51 /*
52 * Platform hooks we will need.
53 */
54
55 #pragma weak plat_build_mem_nodes
56 #pragma weak plat_slice_add
57 #pragma weak plat_slice_del
58
59 /*
60 * Adjust the memnode config after a DR operation.
61 *
62 * It is rather tricky to do these updates since we can't
63 * protect the memnode structures with locks, so we must
64 * be mindful of the order in which updates and reads to
65 * these values can occur.
66 */
67 void
68 mem_node_add_slice(pfn_t start, pfn_t end)
69 {
70 int mnode;
71 mnodeset_t newmask, oldmask;
72
73 /*
74 * DR will pass us the first pfn that is allocatable.
75 * We need to round down to get the real start of
76 * the slice.
77 */
78 if (mem_node_physalign) {
79 start &= ~(btop(mem_node_physalign) - 1);
80 end = roundup(end, btop(mem_node_physalign)) - 1;
81 }
82
83 mnode = PFN_2_MEM_NODE(start);
84 ASSERT(mnode < max_mem_nodes);
85
86 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
87 /*
88 * Add slice to existing node.
89 */
90 if (start < mem_node_config[mnode].physbase)
91 mem_node_config[mnode].physbase = start;
92 if (end > mem_node_config[mnode].physmax)
93 mem_node_config[mnode].physmax = end;
94 } else {
95 mem_node_config[mnode].physbase = start;
96 mem_node_config[mnode].physmax = end;
97 atomic_inc_16(&num_memnodes);
98 do {
99 oldmask = memnodes_mask;
100 newmask = memnodes_mask | (1ull << mnode);
101 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
102 oldmask);
103 }
104 /*
105 * Let the common lgrp framework know about the new memory
106 */
107 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
108 }
109
110 /*
111 * Remove a PFN range from a memnode. On some platforms,
112 * the memnode will be created with physbase at the first
113 * allocatable PFN, but later deleted with the MC slice
114 * base address converted to a PFN, in which case we need
115 * to assume physbase and up.
116 */
117 void
118 mem_node_del_slice(pfn_t start, pfn_t end)
119 {
120 int mnode;
121 pgcnt_t delta_pgcnt, node_size;
122 mnodeset_t omask, nmask;
123
124 if (mem_node_physalign) {
125 start &= ~(btop(mem_node_physalign) - 1);
126 end = roundup(end, btop(mem_node_physalign)) - 1;
127 }
128 mnode = PFN_2_MEM_NODE(start);
129
130 ASSERT(mnode < max_mem_nodes);
131 ASSERT(mem_node_config[mnode].exists == 1);
132
133 delta_pgcnt = end - start;
134 node_size = mem_node_config[mnode].physmax -
135 mem_node_config[mnode].physbase;
136
137 if (node_size > delta_pgcnt) {
138 /*
139 * Subtract the slice from the memnode.
140 */
141 if (start <= mem_node_config[mnode].physbase)
142 mem_node_config[mnode].physbase = end + 1;
143 ASSERT(end <= mem_node_config[mnode].physmax);
144 if (end == mem_node_config[mnode].physmax)
145 mem_node_config[mnode].physmax = start - 1;
146 } else {
147
148 /*
149 * Let the common lgrp framework know the mnode is
150 * leaving
151 */
152 lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
153 MEM_NODE_2_LGRPHAND(mnode));
154
155 /*
156 * Delete the whole node.
157 */
158 ASSERT(MNODE_PGCNT(mnode) == 0);
159 do {
160 omask = memnodes_mask;
161 nmask = omask & ~(1ull << mnode);
162 } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
163 atomic_dec_16(&num_memnodes);
164 mem_node_config[mnode].exists = 0;
165 }
166 }
167
168 void
169 mem_node_add_range(pfn_t start, pfn_t end)
170 {
171 if (&plat_slice_add != NULL)
172 plat_slice_add(start, end);
173 else
174 mem_node_add_slice(start, end);
175 }
176
177 void
178 mem_node_del_range(pfn_t start, pfn_t end)
179 {
180 if (&plat_slice_del != NULL)
181 plat_slice_del(start, end);
182 else
197 plat_build_mem_nodes(list, nelems);
198 } else {
199 /*
200 * Boot install lists are arranged <addr, len>, ...
201 */
202 for (elem = 0; elem < nelems; list++, elem++) {
203 basepfn = btop(list->addr);
204 npgs = btop(list->size);
205 mem_node_add_range(basepfn, basepfn + npgs - 1);
206 }
207 }
208 }
209
210 /*
211 * Allocate an unassigned memnode.
212 */
213 int
214 mem_node_alloc()
215 {
216 int mnode;
217 mnodeset_t newmask, oldmask;
218
219 /*
220 * Find an unused memnode. Update it atomically to prevent
221 * a first time memnode creation race.
222 */
223 for (mnode = 0; mnode < max_mem_nodes; mnode++)
224 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
225 0, 1) == 0)
226 break;
227
228 if (mnode >= max_mem_nodes)
229 panic("Out of free memnodes\n");
230
231 mem_node_config[mnode].physbase = (uint64_t)-1;
232 mem_node_config[mnode].physmax = 0;
233 atomic_inc_16(&num_memnodes);
234 do {
235 oldmask = memnodes_mask;
236 newmask = memnodes_mask | (1ull << mnode);
237 } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
238
239 return (mnode);
240 }
241
242 /*
243 * Find the intersection between a memnode and a memlist
244 * and returns the number of pages that overlap.
245 *
246 * Grab the memlist lock to protect the list from DR operations.
247 */
248 pgcnt_t
249 mem_node_memlist_pages(int mnode, struct memlist *mlist)
250 {
251 pfn_t base, end;
252 pfn_t cur_base, cur_end;
253 pgcnt_t npgs = 0;
254 pgcnt_t pages;
255 struct memlist *pmem;
256
257 if (&plat_mem_node_intersect_range != NULL) {
|
51 /*
52 * Platform hooks we will need.
53 */
54
55 #pragma weak plat_build_mem_nodes
56 #pragma weak plat_slice_add
57 #pragma weak plat_slice_del
58
59 /*
60 * Adjust the memnode config after a DR operation.
61 *
62 * It is rather tricky to do these updates since we can't
63 * protect the memnode structures with locks, so we must
64 * be mindful of the order in which updates and reads to
65 * these values can occur.
66 */
67 void
68 mem_node_add_slice(pfn_t start, pfn_t end)
69 {
70 int mnode;
71
72 /*
73 * DR will pass us the first pfn that is allocatable.
74 * We need to round down to get the real start of
75 * the slice.
76 */
77 if (mem_node_physalign) {
78 start &= ~(btop(mem_node_physalign) - 1);
79 end = roundup(end, btop(mem_node_physalign)) - 1;
80 }
81
82 mnode = PFN_2_MEM_NODE(start);
83 ASSERT(mnode < max_mem_nodes);
84
85 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
86 /*
87 * Add slice to existing node.
88 */
89 if (start < mem_node_config[mnode].physbase)
90 mem_node_config[mnode].physbase = start;
91 if (end > mem_node_config[mnode].physmax)
92 mem_node_config[mnode].physmax = end;
93 } else {
94 mem_node_config[mnode].physbase = start;
95 mem_node_config[mnode].physmax = end;
96 atomic_inc_16(&num_memnodes);
97 atomic_or_64(&memnodes_mask, 1ull << mnode);
98 }
99 /*
100 * Let the common lgrp framework know about the new memory
101 */
102 lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
103 }
104
105 /*
106 * Remove a PFN range from a memnode. On some platforms,
107 * the memnode will be created with physbase at the first
108 * allocatable PFN, but later deleted with the MC slice
109 * base address converted to a PFN, in which case we need
110 * to assume physbase and up.
111 */
112 void
113 mem_node_del_slice(pfn_t start, pfn_t end)
114 {
115 int mnode;
116 pgcnt_t delta_pgcnt, node_size;
117
118 if (mem_node_physalign) {
119 start &= ~(btop(mem_node_physalign) - 1);
120 end = roundup(end, btop(mem_node_physalign)) - 1;
121 }
122 mnode = PFN_2_MEM_NODE(start);
123
124 ASSERT(mnode < max_mem_nodes);
125 ASSERT(mem_node_config[mnode].exists == 1);
126
127 delta_pgcnt = end - start;
128 node_size = mem_node_config[mnode].physmax -
129 mem_node_config[mnode].physbase;
130
131 if (node_size > delta_pgcnt) {
132 /*
133 * Subtract the slice from the memnode.
134 */
135 if (start <= mem_node_config[mnode].physbase)
136 mem_node_config[mnode].physbase = end + 1;
137 ASSERT(end <= mem_node_config[mnode].physmax);
138 if (end == mem_node_config[mnode].physmax)
139 mem_node_config[mnode].physmax = start - 1;
140 } else {
141
142 /*
143 * Let the common lgrp framework know the mnode is
144 * leaving
145 */
146 lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
147 MEM_NODE_2_LGRPHAND(mnode));
148
149 /*
150 * Delete the whole node.
151 */
152 ASSERT(MNODE_PGCNT(mnode) == 0);
153 atomic_and_64(&memnodes_mask, ~(1ull << mnode));
154 atomic_dec_16(&num_memnodes);
155 mem_node_config[mnode].exists = 0;
156 }
157 }
158
159 void
160 mem_node_add_range(pfn_t start, pfn_t end)
161 {
162 if (&plat_slice_add != NULL)
163 plat_slice_add(start, end);
164 else
165 mem_node_add_slice(start, end);
166 }
167
168 void
169 mem_node_del_range(pfn_t start, pfn_t end)
170 {
171 if (&plat_slice_del != NULL)
172 plat_slice_del(start, end);
173 else
188 plat_build_mem_nodes(list, nelems);
189 } else {
190 /*
191 * Boot install lists are arranged <addr, len>, ...
192 */
193 for (elem = 0; elem < nelems; list++, elem++) {
194 basepfn = btop(list->addr);
195 npgs = btop(list->size);
196 mem_node_add_range(basepfn, basepfn + npgs - 1);
197 }
198 }
199 }
200
201 /*
202 * Allocate an unassigned memnode.
203 */
204 int
205 mem_node_alloc()
206 {
207 int mnode;
208
209 /*
210 * Find an unused memnode. Update it atomically to prevent
211 * a first time memnode creation race.
212 */
213 for (mnode = 0; mnode < max_mem_nodes; mnode++)
214 if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
215 0, 1) == 0)
216 break;
217
218 if (mnode >= max_mem_nodes)
219 panic("Out of free memnodes\n");
220
221 mem_node_config[mnode].physbase = (uint64_t)-1;
222 mem_node_config[mnode].physmax = 0;
223 atomic_inc_16(&num_memnodes);
224 atomic_or_64(&memnodes_mask, 1ull << mnode);
225
226 return (mnode);
227 }
228
229 /*
230 * Find the intersection between a memnode and a memlist
231 * and returns the number of pages that overlap.
232 *
233 * Grab the memlist lock to protect the list from DR operations.
234 */
235 pgcnt_t
236 mem_node_memlist_pages(int mnode, struct memlist *mlist)
237 {
238 pfn_t base, end;
239 pfn_t cur_base, cur_end;
240 pgcnt_t npgs = 0;
241 pgcnt_t pages;
242 struct memlist *pmem;
243
244 if (&plat_mem_node_intersect_range != NULL) {
|