1 /* $OpenBSD: gdt.c,v 1.27 2007/07/02 17:11:29 thib Exp $ */
2 /* $NetBSD: gdt.c,v 1.28 2002/12/14 09:38:50 junyoung Exp $ */
3
4 /*-
5 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by John T. Kohl and Charles M. Hannum.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * The GDT handling has two phases. During the early lifetime of the
42 * kernel there is a static gdt which will be stored in bootstrap_gdt.
43 * Later, when the virtual memory is initialized, this will be
44 * replaced with a dynamically resizable GDT (although, we will only
45 * ever be growing it, there is almost no gain at all to compact it,
46 * and it has proven to be a complicated thing to do, considering
47 * parallel access, so it's just not worth the effort.
48 *
49 * The static GDT area will hold the initial requirement of NGDT descriptors.
50 * The dynamic GDT will have a statically sized virtual memory area of size
51 * GDTMAXPAGES, the physical area backing this will be allocated as needed
52 * starting with the size needed for holding a copy of the bootstrap gdt.
53 *
54 * Every CPU in a system has its own copy of the GDT. The only real difference
55 * between the two are currently that there is a cpu-specific segment holding
56 * the struct cpu_info of the processor, for simplicity at getting cpu_info
57 * fields from assembly. The boot processor will actually refer to the global
58 * copy of the GDT as pointed to by the gdt variable.
59 */
60
61 #include <sys/cdefs.h>
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/lock.h>
67 #include <sys/user.h>
68 #include <sys/rwlock.h>
69
70 #include <uvm/uvm.h>
71
72 #include <machine/gdt.h>
73
74 union descriptor bootstrap_gdt[NGDT];
75 union descriptor *gdt = bootstrap_gdt;
76
77 int gdt_size; /* total number of GDT entries */
78 int gdt_next; /* next available slot for sweeping */
79 int gdt_free; /* next free slot; terminated with GNULL_SEL */
80
81 struct rwlock gdt_lock_store = RWLOCK_INITIALIZER("gdtlk");
82
83 void gdt_grow(void);
84 int gdt_get_slot(void);
85 void gdt_put_slot(int);
86
87 /*
88 * Lock and unlock the GDT, to avoid races in case gdt_{ge,pu}t_slot() sleep
89 * waiting for memory.
90 */
91 #define gdt_lock() \
92 do { \
93 if (curproc != NULL) \
94 rw_enter_write(&gdt_lock_store);\
95 } while (0)
96
97 #define gdt_unlock() \
98 do { \
99 if (curproc != NULL) \
100 rw_exit_write(&gdt_lock_store); \
101 } while (0)
102
103 /* XXX needs spinlocking if we ever mean to go finegrained. */
104 void
105 setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32,
106 int gran)
107 {
108 struct segment_descriptor *sd = &gdt[sel].sd;
109 CPU_INFO_ITERATOR cii;
110 struct cpu_info *ci;
111
112 KASSERT(sel < gdt_size);
113
114 setsegment(sd, base, limit, type, dpl, def32, gran);
115 CPU_INFO_FOREACH(cii, ci)
116 if (ci->ci_gdt != NULL && ci->ci_gdt != gdt)
117 ci->ci_gdt[sel].sd = *sd;
118 }
119
120 /*
121 * Initialize the GDT subsystem. Called from autoconf().
122 */
123 void
124 gdt_init()
125 {
126 size_t max_len, min_len;
127 struct vm_page *pg;
128 vaddr_t va;
129 struct cpu_info *ci = &cpu_info_primary;
130
131 max_len = MAXGDTSIZ * sizeof(union descriptor);
132 min_len = MINGDTSIZ * sizeof(union descriptor);
133
134 gdt_size = MINGDTSIZ;
135 gdt_next = NGDT;
136 gdt_free = GNULL_SEL;
137
138 gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
139 for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
140 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
141 if (pg == NULL)
142 panic("gdt_init: no pages");
143 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
144 VM_PROT_READ | VM_PROT_WRITE);
145 }
146 bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
147 ci->ci_gdt = gdt;
148 setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
149 SDT_MEMRWA, SEL_KPL, 0, 0);
150
151 gdt_init_cpu(ci);
152 }
153
154 #ifdef MULTIPROCESSOR
155 /*
156 * Allocate shadow GDT for a slave cpu.
157 */
158 void
159 gdt_alloc_cpu(struct cpu_info *ci)
160 {
161 int max_len = MAXGDTSIZ * sizeof(union descriptor);
162 int min_len = MINGDTSIZ * sizeof(union descriptor);
163
164 ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
165 uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt,
166 (vaddr_t)ci->ci_gdt + min_len, FALSE, FALSE);
167 bzero(ci->ci_gdt, min_len);
168 bcopy(gdt, ci->ci_gdt, gdt_size * sizeof(union descriptor));
169 setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
170 SDT_MEMRWA, SEL_KPL, 0, 0);
171 }
172 #endif /* MULTIPROCESSOR */
173
174
175 /*
176 * Load appropriate gdt descriptor; we better be running on *ci
177 * (for the most part, this is how a cpu knows who it is).
178 */
179 void
180 gdt_init_cpu(struct cpu_info *ci)
181 {
182 struct region_descriptor region;
183
184 setregion(®ion, ci->ci_gdt,
185 MAXGDTSIZ * sizeof(union descriptor) - 1);
186 lgdt(®ion);
187 }
188
189 /*
190 * Grow the GDT.
191 */
192 void
193 gdt_grow()
194 {
195 size_t old_len, new_len;
196 CPU_INFO_ITERATOR cii;
197 struct cpu_info *ci;
198 struct vm_page *pg;
199 vaddr_t va;
200
201 old_len = gdt_size * sizeof(union descriptor);
202 gdt_size <<= 1;
203 new_len = old_len << 1;
204
205 CPU_INFO_FOREACH(cii, ci) {
206 for (va = (vaddr_t)(ci->ci_gdt) + old_len;
207 va < (vaddr_t)(ci->ci_gdt) + new_len;
208 va += PAGE_SIZE) {
209 while (
210 (pg =
211 uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
212 NULL) {
213 uvm_wait("gdt_grow");
214 }
215 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
216 VM_PROT_READ | VM_PROT_WRITE);
217 }
218 }
219 }
220
221 /*
222 * Allocate a GDT slot as follows:
223 * 1) If there are entries on the free list, use those.
224 * 2) If there are fewer than gdt_size entries in use, there are free slots
225 * near the end that we can sweep through.
226 * 3) As a last resort, we increase the size of the GDT, and sweep through
227 * the new slots.
228 */
229 int
230 gdt_get_slot()
231 {
232 int slot;
233
234 gdt_lock();
235
236 if (gdt_free != GNULL_SEL) {
237 slot = gdt_free;
238 gdt_free = gdt[slot].gd.gd_selector;
239 } else {
240 if (gdt_next >= gdt_size) {
241 if (gdt_size >= MAXGDTSIZ)
242 panic("gdt_get_slot: out of GDT descriptors");
243 gdt_grow();
244 }
245 slot = gdt_next++;
246 }
247
248 gdt_unlock();
249 return (slot);
250 }
251
252 /*
253 * Deallocate a GDT slot, putting it on the free list.
254 */
255 void
256 gdt_put_slot(int slot)
257 {
258
259 gdt_lock();
260
261 gdt[slot].gd.gd_type = SDT_SYSNULL;
262 gdt[slot].gd.gd_selector = gdt_free;
263 gdt_free = slot;
264
265 gdt_unlock();
266 }
267
268 int
269 tss_alloc(struct pcb *pcb)
270 {
271 int slot;
272
273 slot = gdt_get_slot();
274 setgdt(slot, &pcb->pcb_tss, sizeof(struct pcb) - 1,
275 SDT_SYS386TSS, SEL_KPL, 0, 0);
276 return GSEL(slot, SEL_KPL);
277 }
278
279 void
280 tss_free(int sel)
281 {
282
283 gdt_put_slot(IDXSEL(sel));
284 }
285
286 #ifdef USER_LDT
287 /*
288 * Caller must have pmap locked for both of these functions.
289 */
290 void
291 ldt_alloc(struct pmap *pmap, union descriptor *ldt, size_t len)
292 {
293 int slot;
294
295 slot = gdt_get_slot();
296 setgdt(slot, ldt, len - 1, SDT_SYSLDT, SEL_KPL, 0, 0);
297 pmap->pm_ldt_sel = GSEL(slot, SEL_KPL);
298 }
299
300 void
301 ldt_free(struct pmap *pmap)
302 {
303 int slot;
304
305 slot = IDXSEL(pmap->pm_ldt_sel);
306
307 gdt_put_slot(slot);
308 }
309 #endif /* USER_LDT */