FreeBSD kernel kern code
subr_pcpu.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2001 Wind River Systems, Inc.
5 * All rights reserved.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
7 *
8 * Copyright (c) 2009 Jeffrey Roberson <jeff@freebsd.org>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36/*
37 * This module provides MI support for per-cpu data.
38 *
39 * Each architecture determines the mapping of logical CPU IDs to physical
40 * CPUs. The requirements of this mapping are as follows:
41 * - Logical CPU IDs must reside in the range 0 ... MAXCPU - 1.
42 * - The mapping is not required to be dense. That is, there may be
43 * gaps in the mappings.
44 * - The platform sets the value of MAXCPU in <machine/param.h>.
45 * - It is suggested, but not required, that in the non-SMP case, the
46 * platform define MAXCPU to be 1 and define the logical ID of the
47 * sole CPU as 0.
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD$");
52
53#include "opt_ddb.h"
54
55#include <sys/param.h>
56#include <sys/systm.h>
57#include <sys/sysctl.h>
58#include <sys/lock.h>
59#include <sys/malloc.h>
60#include <sys/pcpu.h>
61#include <sys/proc.h>
62#include <sys/smp.h>
63#include <sys/sx.h>
64#include <vm/uma.h>
65#include <ddb/ddb.h>
66
67static MALLOC_DEFINE(M_PCPU, "Per-cpu", "Per-cpu resource accouting.");
68
69struct dpcpu_free {
70 uintptr_t df_start;
71 int df_len;
72 TAILQ_ENTRY(dpcpu_free) df_link;
73};
74
75DPCPU_DEFINE_STATIC(char, modspace[DPCPU_MODMIN] __aligned(__alignof(void *)));
76static TAILQ_HEAD(, dpcpu_free) dpcpu_head = TAILQ_HEAD_INITIALIZER(dpcpu_head);
77static struct sx dpcpu_lock;
78uintptr_t dpcpu_off[MAXCPU];
79struct pcpu *cpuid_to_pcpu[MAXCPU];
80struct cpuhead cpuhead = STAILQ_HEAD_INITIALIZER(cpuhead);
81
82/*
83 * Initialize the MI portions of a struct pcpu.
84 */
85void
86pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
87{
88
89 bzero(pcpu, size);
90 KASSERT(cpuid >= 0 && cpuid < MAXCPU,
91 ("pcpu_init: invalid cpuid %d", cpuid));
92 pcpu->pc_cpuid = cpuid;
93 cpuid_to_pcpu[cpuid] = pcpu;
94 STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu);
95 cpu_pcpu_init(pcpu, cpuid, size);
96 pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
97 pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
98 pcpu->pc_zpcpu_offset = zpcpu_offset_cpu(cpuid);
99}
100
101void
102dpcpu_init(void *dpcpu, int cpuid)
103{
104 struct pcpu *pcpu;
105
106 pcpu = pcpu_find(cpuid);
107 pcpu->pc_dynamic = (uintptr_t)dpcpu - DPCPU_START;
108
109 /*
110 * Initialize defaults from our linker section.
111 */
112 memcpy(dpcpu, (void *)DPCPU_START, DPCPU_BYTES);
113
114 /*
115 * Place it in the global pcpu offset array.
116 */
117 dpcpu_off[cpuid] = pcpu->pc_dynamic;
118}
119
120static void
121dpcpu_startup(void *dummy __unused)
122{
123 struct dpcpu_free *df;
124
125 df = malloc(sizeof(*df), M_PCPU, M_WAITOK | M_ZERO);
126 df->df_start = (uintptr_t)&DPCPU_NAME(modspace);
127 df->df_len = DPCPU_MODMIN;
128 TAILQ_INSERT_HEAD(&dpcpu_head, df, df_link);
129 sx_init(&dpcpu_lock, "dpcpu alloc lock");
130}
131SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, NULL);
132
133/*
134 * UMA_ZONE_PCPU zones for general kernel use.
135 */
136uma_zone_t pcpu_zone_4;
137uma_zone_t pcpu_zone_8;
138uma_zone_t pcpu_zone_16;
139uma_zone_t pcpu_zone_32;
140uma_zone_t pcpu_zone_64;
141
142static void
144{
145
146 pcpu_zone_4 = uma_zcreate("pcpu-4", 4,
147 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
148 pcpu_zone_8 = uma_zcreate("pcpu-8", 8,
149 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
150 pcpu_zone_16 = uma_zcreate("pcpu-16", 16,
151 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
152 pcpu_zone_32 = uma_zcreate("pcpu-32", 32,
153 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
154 pcpu_zone_64 = uma_zcreate("pcpu-64", 64,
155 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
156}
157SYSINIT(pcpu_zones, SI_SUB_COUNTER, SI_ORDER_FIRST, pcpu_zones_startup, NULL);
158
159/*
160 * First-fit extent based allocator for allocating space in the per-cpu
161 * region reserved for modules. This is only intended for use by the
162 * kernel linkers to place module linker sets.
163 */
164void *
165dpcpu_alloc(int size)
166{
167 struct dpcpu_free *df;
168 void *s;
169
170 s = NULL;
171 size = roundup2(size, sizeof(void *));
172 sx_xlock(&dpcpu_lock);
173 TAILQ_FOREACH(df, &dpcpu_head, df_link) {
174 if (df->df_len < size)
175 continue;
176 if (df->df_len == size) {
177 s = (void *)df->df_start;
178 TAILQ_REMOVE(&dpcpu_head, df, df_link);
179 free(df, M_PCPU);
180 break;
181 }
182 s = (void *)df->df_start;
183 df->df_len -= size;
184 df->df_start = df->df_start + size;
185 break;
186 }
187 sx_xunlock(&dpcpu_lock);
188
189 return (s);
190}
191
192/*
193 * Free dynamic per-cpu space at module unload time.
194 */
195void
196dpcpu_free(void *s, int size)
197{
198 struct dpcpu_free *df;
199 struct dpcpu_free *dn;
200 uintptr_t start;
201 uintptr_t end;
202
203 size = roundup2(size, sizeof(void *));
204 start = (uintptr_t)s;
205 end = start + size;
206 /*
207 * Free a region of space and merge it with as many neighbors as
208 * possible. Keeping the list sorted simplifies this operation.
209 */
210 sx_xlock(&dpcpu_lock);
211 TAILQ_FOREACH(df, &dpcpu_head, df_link) {
212 if (df->df_start > end)
213 break;
214 /*
215 * If we expand at the end of an entry we may have to
216 * merge it with the one following it as well.
217 */
218 if (df->df_start + df->df_len == start) {
219 df->df_len += size;
220 dn = TAILQ_NEXT(df, df_link);
221 if (df->df_start + df->df_len == dn->df_start) {
222 df->df_len += dn->df_len;
223 TAILQ_REMOVE(&dpcpu_head, dn, df_link);
224 free(dn, M_PCPU);
225 }
226 sx_xunlock(&dpcpu_lock);
227 return;
228 }
229 if (df->df_start == end) {
230 df->df_start = start;
231 df->df_len += size;
232 sx_xunlock(&dpcpu_lock);
233 return;
234 }
235 }
236 dn = malloc(sizeof(*df), M_PCPU, M_WAITOK | M_ZERO);
237 dn->df_start = start;
238 dn->df_len = size;
239 if (df)
240 TAILQ_INSERT_BEFORE(df, dn, df_link);
241 else
242 TAILQ_INSERT_TAIL(&dpcpu_head, dn, df_link);
243 sx_xunlock(&dpcpu_lock);
244}
245
246/*
247 * Initialize the per-cpu storage from an updated linker-set region.
248 */
249void
250dpcpu_copy(void *s, int size)
251{
252#ifdef SMP
253 uintptr_t dpcpu;
254 int i;
255
256 CPU_FOREACH(i) {
257 dpcpu = dpcpu_off[i];
258 if (dpcpu == 0)
259 continue;
260 memcpy((void *)(dpcpu + (uintptr_t)s), s, size);
261 }
262#else
263 memcpy((void *)(dpcpu_off[0] + (uintptr_t)s), s, size);
264#endif
265}
266
267/*
268 * Destroy a struct pcpu.
269 */
270void
271pcpu_destroy(struct pcpu *pcpu)
272{
273
274 STAILQ_REMOVE(&cpuhead, pcpu, pcpu, pc_allcpu);
275 cpuid_to_pcpu[pcpu->pc_cpuid] = NULL;
276 dpcpu_off[pcpu->pc_cpuid] = 0;
277}
278
279/*
280 * Locate a struct pcpu by cpu id.
281 */
282struct pcpu *
283pcpu_find(u_int cpuid)
284{
285
286 return (cpuid_to_pcpu[cpuid]);
287}
288
289int
290sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS)
291{
292 uintptr_t dpcpu;
293 int64_t count;
294 int i;
295
296 count = 0;
297 CPU_FOREACH(i) {
298 dpcpu = dpcpu_off[i];
299 if (dpcpu == 0)
300 continue;
301 count += *(int64_t *)(dpcpu + (uintptr_t)arg1);
302 }
303 return (SYSCTL_OUT(req, &count, sizeof(count)));
304}
305
306int
307sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS)
308{
309 uintptr_t dpcpu;
310 long count;
311 int i;
312
313 count = 0;
314 CPU_FOREACH(i) {
315 dpcpu = dpcpu_off[i];
316 if (dpcpu == 0)
317 continue;
318 count += *(long *)(dpcpu + (uintptr_t)arg1);
319 }
320 return (SYSCTL_OUT(req, &count, sizeof(count)));
321}
322
323int
324sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS)
325{
326 uintptr_t dpcpu;
327 int count;
328 int i;
329
330 count = 0;
331 CPU_FOREACH(i) {
332 dpcpu = dpcpu_off[i];
333 if (dpcpu == 0)
334 continue;
335 count += *(int *)(dpcpu + (uintptr_t)arg1);
336 }
337 return (SYSCTL_OUT(req, &count, sizeof(count)));
338}
339
340#ifdef DDB
341DB_SHOW_COMMAND(dpcpu_off, db_show_dpcpu_off)
342{
343 int id;
344
345 CPU_FOREACH(id) {
346 db_printf("dpcpu_off[%2d] = 0x%jx (+ DPCPU_START = %p)\n",
347 id, (uintmax_t)dpcpu_off[id],
348 (void *)(uintptr_t)(dpcpu_off[id] + DPCPU_START));
349 }
350}
351
352static void
353show_pcpu(struct pcpu *pc)
354{
355 struct thread *td;
356
357 db_printf("cpuid = %d\n", pc->pc_cpuid);
358 db_printf("dynamic pcpu = %p\n", (void *)pc->pc_dynamic);
359 db_printf("curthread = ");
360 td = pc->pc_curthread;
361 if (td != NULL)
362 db_printf("%p: pid %d tid %d critnest %d \"%s\"\n", td,
363 td->td_proc->p_pid, td->td_tid, td->td_critnest,
364 td->td_name);
365 else
366 db_printf("none\n");
367 db_printf("curpcb = %p\n", pc->pc_curpcb);
368 db_printf("fpcurthread = ");
369 td = pc->pc_fpcurthread;
370 if (td != NULL)
371 db_printf("%p: pid %d \"%s\"\n", td, td->td_proc->p_pid,
372 td->td_name);
373 else
374 db_printf("none\n");
375 db_printf("idlethread = ");
376 td = pc->pc_idlethread;
377 if (td != NULL)
378 db_printf("%p: tid %d \"%s\"\n", td, td->td_tid, td->td_name);
379 else
380 db_printf("none\n");
381 db_show_mdpcpu(pc);
382
383#ifdef VIMAGE
384 db_printf("curvnet = %p\n", pc->pc_curthread->td_vnet);
385#endif
386
387#ifdef WITNESS
388 db_printf("spin locks held:\n");
389 witness_list_locks(&pc->pc_spinlocks, db_printf);
390#endif
391}
392
393DB_SHOW_COMMAND(pcpu, db_show_pcpu)
394{
395 struct pcpu *pc;
396 int id;
397
398 if (have_addr)
399 id = ((addr >> 4) % 16) * 10 + (addr % 16);
400 else
401 id = PCPU_GET(cpuid);
402 pc = pcpu_find(id);
403 if (pc == NULL) {
404 db_printf("CPU %d not found\n", id);
405 return;
406 }
407 show_pcpu(pc);
408}
409
410DB_SHOW_ALL_COMMAND(pcpu, db_show_cpu_all)
411{
412 struct pcpu *pc;
413 int id;
414
415 db_printf("Current CPU: %d\n\n", PCPU_GET(cpuid));
416 CPU_FOREACH(id) {
417 pc = pcpu_find(id);
418 if (pc != NULL) {
419 show_pcpu(pc);
420 db_printf("\n");
421 }
422 }
423}
424DB_SHOW_ALIAS(allpcpu, db_show_cpu_all);
425#endif
int * count
Definition: cpufreq_if.m:63
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
struct tidbatch __aligned
void *** start
Definition: linker_if.m:98
uint64_t * addr
Definition: msi_if.m:89
uintptr_t df_start
Definition: subr_pcpu.c:70
int df_len
Definition: subr_pcpu.c:71
static void pcpu_zones_startup(void)
Definition: subr_pcpu.c:143
void dpcpu_init(void *dpcpu, int cpuid)
Definition: subr_pcpu.c:102
static void dpcpu_startup(void *dummy __unused)
Definition: subr_pcpu.c:121
int sysctl_dpcpu_quad(SYSCTL_HANDLER_ARGS)
Definition: subr_pcpu.c:290
uma_zone_t pcpu_zone_4
Definition: subr_pcpu.c:136
void pcpu_destroy(struct pcpu *pcpu)
Definition: subr_pcpu.c:271
uma_zone_t pcpu_zone_8
Definition: subr_pcpu.c:137
int sysctl_dpcpu_long(SYSCTL_HANDLER_ARGS)
Definition: subr_pcpu.c:307
struct pcpu * pcpu_find(u_int cpuid)
Definition: subr_pcpu.c:283
SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, NULL)
int sysctl_dpcpu_int(SYSCTL_HANDLER_ARGS)
Definition: subr_pcpu.c:324
void dpcpu_copy(void *s, int size)
Definition: subr_pcpu.c:250
__FBSDID("$FreeBSD$")
void * dpcpu_alloc(int size)
Definition: subr_pcpu.c:165
DPCPU_DEFINE_STATIC(char, modspace[DPCPU_MODMIN] __aligned(__alignof(void *)))
uma_zone_t pcpu_zone_64
Definition: subr_pcpu.c:140
uma_zone_t pcpu_zone_16
Definition: subr_pcpu.c:138
static MALLOC_DEFINE(M_PCPU, "Per-cpu", "Per-cpu resource accouting.")
static TAILQ_HEAD(dpcpu_free)
Definition: subr_pcpu.c:76
void dpcpu_free(void *s, int size)
Definition: subr_pcpu.c:196
uma_zone_t pcpu_zone_32
Definition: subr_pcpu.c:139
int witness_list_locks(struct lock_list_entry **lock_list, int(*prnt)(const char *fmt,...))
static int dummy