1 | /* $NetBSD: cpu.h,v 1.67 2015/12/13 15:02:19 maxv Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 1990 The Regents of the University of California. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to Berkeley by |
8 | * William Jolitz. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * 3. Neither the name of the University nor the names of its contributors |
19 | * may be used to endorse or promote products derived from this software |
20 | * without specific prior written permission. |
21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
32 | * SUCH DAMAGE. |
33 | * |
34 | * @(#)cpu.h 5.4 (Berkeley) 5/9/91 |
35 | */ |
36 | |
37 | #ifndef _X86_CPU_H_ |
38 | #define _X86_CPU_H_ |
39 | |
40 | #if defined(_KERNEL) || defined(_STANDALONE) |
41 | #include <sys/types.h> |
42 | #else |
43 | #include <stdint.h> |
44 | #include <stdbool.h> |
45 | #endif /* _KERNEL || _STANDALONE */ |
46 | |
47 | #if defined(_KERNEL) || defined(_KMEMUSER) |
48 | #if defined(_KERNEL_OPT) |
49 | #include "opt_xen.h" |
50 | #ifdef i386 |
51 | #include "opt_user_ldt.h" |
52 | #include "opt_vm86.h" |
53 | #endif |
54 | #endif |
55 | |
56 | /* |
57 | * Definitions unique to x86 cpu support. |
58 | */ |
59 | #include <machine/frame.h> |
60 | #include <machine/pte.h> |
61 | #include <machine/segments.h> |
62 | #include <machine/tss.h> |
63 | #include <machine/intrdefs.h> |
64 | |
65 | #include <x86/cacheinfo.h> |
66 | |
67 | #include <sys/cpu_data.h> |
68 | #include <sys/evcnt.h> |
69 | #include <sys/device_if.h> /* for device_t */ |
70 | |
71 | #ifdef XEN |
72 | #include <xen/xen-public/xen.h> |
73 | #include <xen/xen-public/event_channel.h> |
74 | #include <sys/mutex.h> |
75 | #endif /* XEN */ |
76 | |
77 | struct intrsource; |
78 | struct pmap; |
79 | |
80 | #ifdef __x86_64__ |
81 | #define i386tss x86_64_tss |
82 | #endif |
83 | |
84 | #define NIOPORTS 1024 /* # of ports we allow to be mapped */ |
85 | #define IOMAPSIZE (NIOPORTS / 8) /* I/O bitmap size in bytes */ |
86 | |
87 | /* |
88 | * a bunch of this belongs in cpuvar.h; move it later.. |
89 | */ |
90 | |
91 | struct cpu_info { |
92 | struct cpu_data ci_data; /* MI per-cpu data */ |
93 | device_t ci_dev; /* pointer to our device */ |
94 | struct cpu_info *ci_self; /* self-pointer */ |
95 | volatile struct vcpu_info *ci_vcpu; /* for XEN */ |
96 | void *ci_tlog_base; /* Trap log base */ |
97 | int32_t ci_tlog_offset; /* Trap log current offset */ |
98 | |
99 | /* |
100 | * Will be accessed by other CPUs. |
101 | */ |
102 | struct cpu_info *ci_next; /* next cpu */ |
103 | struct lwp *ci_curlwp; /* current owner of the processor */ |
104 | struct lwp *ci_fpcurlwp; /* current owner of the FPU */ |
105 | int _unused1[2]; |
106 | cpuid_t ci_cpuid; /* our CPU ID */ |
107 | int _unused; |
108 | uint32_t ci_acpiid; /* our ACPI/MADT ID */ |
109 | uint32_t ci_initapicid; /* our intitial APIC ID */ |
110 | |
111 | /* |
112 | * Private members. |
113 | */ |
114 | struct evcnt ci_tlb_evcnt; /* tlb shootdown counter */ |
115 | struct pmap *ci_pmap; /* current pmap */ |
116 | int ci_need_tlbwait; /* need to wait for TLB invalidations */ |
117 | int ci_want_pmapload; /* pmap_load() is needed */ |
118 | volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */ |
119 | #define TLBSTATE_VALID 0 /* all user tlbs are valid */ |
120 | #define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */ |
121 | #define TLBSTATE_STALE 2 /* we might have stale user tlbs */ |
122 | int ci_curldt; /* current LDT descriptor */ |
123 | int ci_nintrhand; /* number of H/W interrupt handlers */ |
124 | uint64_t ci_scratch; |
125 | uintptr_t ci_pmap_data[128 / sizeof(uintptr_t)]; |
126 | |
127 | #ifdef XEN |
128 | struct iplsource *ci_isources[NIPL]; |
129 | u_long ci_evtmask[NR_EVENT_CHANNELS]; /* events allowed on this CPU */ |
130 | #else |
131 | struct intrsource *ci_isources[MAX_INTR_SOURCES]; |
132 | #endif |
133 | volatile int ci_mtx_count; /* Negative count of spin mutexes */ |
134 | volatile int ci_mtx_oldspl; /* Old SPL at this ci_idepth */ |
135 | |
136 | /* The following must be aligned for cmpxchg8b. */ |
137 | struct { |
138 | uint32_t ipending; |
139 | int ilevel; |
140 | } ci_istate __aligned(8); |
141 | #define ci_ipending ci_istate.ipending |
142 | #define ci_ilevel ci_istate.ilevel |
143 | |
144 | int ci_idepth; |
145 | void * ci_intrstack; |
146 | uint32_t ci_imask[NIPL]; |
147 | uint32_t ci_iunmask[NIPL]; |
148 | |
149 | uint32_t ci_flags; /* flags; see below */ |
150 | uint32_t ci_ipis; /* interprocessor interrupts pending */ |
151 | uint32_t sc_apic_version; /* local APIC version */ |
152 | |
153 | uint32_t ci_signature; /* X86 cpuid type (cpuid.1.%eax) */ |
154 | uint32_t ci_vendor[4]; /* vendor string */ |
155 | uint32_t _unused2; |
156 | uint32_t ci_max_cpuid; /* cpuid.0:%eax */ |
157 | uint32_t ci_max_ext_cpuid; /* cpuid.80000000:%eax */ |
158 | volatile uint32_t ci_lapic_counter; |
159 | |
160 | uint32_t ci_feat_val[7]; /* X86 CPUID feature bits */ |
161 | /* [0] basic features cpuid.1:%edx |
162 | * [1] basic features cpuid.1:%ecx (CPUID2_xxx bits) |
163 | * [2] extended features cpuid:80000001:%edx |
164 | * [3] extended features cpuid:80000001:%ecx |
165 | * [4] VIA padlock features |
166 | * [5] structured extended features cpuid.7:%ebx |
167 | * [6] structured extended features cpuid.7:%ecx |
168 | */ |
169 | |
170 | const struct cpu_functions *ci_func; /* start/stop functions */ |
171 | struct trapframe *ci_ddb_regs; |
172 | |
173 | u_int ci_cflush_lsize; /* CFLUSH insn line size */ |
174 | struct x86_cache_info ci_cinfo[CAI_COUNT]; |
175 | |
176 | union descriptor *ci_gdt; |
177 | |
178 | #ifdef i386 |
179 | struct i386tss ci_doubleflt_tss; |
180 | struct i386tss ci_ddbipi_tss; |
181 | #endif |
182 | |
183 | #ifdef PAE |
184 | uint32_t ci_pae_l3_pdirpa; /* PA of L3 PD */ |
185 | pd_entry_t * ci_pae_l3_pdir; /* VA pointer to L3 PD */ |
186 | #endif |
187 | |
188 | #if defined(XEN) && (defined(PAE) || defined(__x86_64__)) |
189 | /* Currently active user PGD (can't use rcr3() with Xen) */ |
190 | pd_entry_t * ci_kpm_pdir; /* per-cpu PMD (va) */ |
191 | paddr_t ci_kpm_pdirpa; /* per-cpu PMD (pa) */ |
192 | kmutex_t ci_kpm_mtx; |
193 | #if defined(__x86_64__) |
194 | /* per-cpu version of normal_pdes */ |
195 | pd_entry_t * ci_normal_pdes[3]; /* Ok to hardcode. only for x86_64 && XEN */ |
196 | paddr_t ci_xen_current_user_pgd; |
197 | #endif /* __x86_64__ */ |
198 | #endif /* XEN et.al */ |
199 | |
200 | char *ci_doubleflt_stack; |
201 | char *ci_ddbipi_stack; |
202 | |
203 | #ifndef XEN |
204 | struct evcnt ci_ipi_events[X86_NIPI]; |
205 | #else /* XEN */ |
206 | struct evcnt ci_ipi_events[XEN_NIPIS]; |
207 | evtchn_port_t ci_ipi_evtchn; |
208 | #endif /* XEN */ |
209 | |
210 | device_t ci_frequency; /* Frequency scaling technology */ |
211 | device_t ci_padlock; /* VIA PadLock private storage */ |
212 | device_t ci_temperature; /* Intel coretemp(4) or equivalent */ |
213 | device_t ci_vm; /* Virtual machine guest driver */ |
214 | |
215 | struct i386tss ci_tss; /* Per-cpu TSS; shared among LWPs */ |
216 | char ci_iomap[IOMAPSIZE]; /* I/O Bitmap */ |
217 | int ci_tss_sel; /* TSS selector of this cpu */ |
218 | |
219 | /* |
220 | * The following two are actually region_descriptors, |
221 | * but that would pollute the namespace. |
222 | */ |
223 | uintptr_t ci_suspend_gdt; |
224 | uint16_t ci_suspend_gdt_padding; |
225 | uintptr_t ci_suspend_idt; |
226 | uint16_t ci_suspend_idt_padding; |
227 | |
228 | uint16_t ci_suspend_tr; |
229 | uint16_t ci_suspend_ldt; |
230 | uintptr_t ci_suspend_fs; |
231 | uintptr_t ci_suspend_gs; |
232 | uintptr_t ci_suspend_kgs; |
233 | uintptr_t ci_suspend_efer; |
234 | uintptr_t ci_suspend_reg[12]; |
235 | uintptr_t ci_suspend_cr0; |
236 | uintptr_t ci_suspend_cr2; |
237 | uintptr_t ci_suspend_cr3; |
238 | uintptr_t ci_suspend_cr4; |
239 | uintptr_t ci_suspend_cr8; |
240 | |
241 | /* The following must be in a single cache line. */ |
242 | int ci_want_resched __aligned(64); |
243 | int ci_padout __aligned(64); |
244 | }; |
245 | |
246 | /* |
247 | * Macros to handle (some) trapframe registers for common x86 code. |
248 | */ |
249 | #ifdef __x86_64__ |
250 | #define X86_TF_RAX(tf) tf->tf_rax |
251 | #define X86_TF_RDX(tf) tf->tf_rdx |
252 | #define X86_TF_RSP(tf) tf->tf_rsp |
253 | #define X86_TF_RIP(tf) tf->tf_rip |
254 | #define X86_TF_RFLAGS(tf) tf->tf_rflags |
255 | #else |
256 | #define X86_TF_RAX(tf) tf->tf_eax |
257 | #define X86_TF_RDX(tf) tf->tf_edx |
258 | #define X86_TF_RSP(tf) tf->tf_esp |
259 | #define X86_TF_RIP(tf) tf->tf_eip |
260 | #define X86_TF_RFLAGS(tf) tf->tf_eflags |
261 | #endif |
262 | |
263 | /* |
264 | * Processor flag notes: The "primary" CPU has certain MI-defined |
265 | * roles (mostly relating to hardclock handling); we distinguish |
266 | * betwen the processor which booted us, and the processor currently |
267 | * holding the "primary" role just to give us the flexibility later to |
268 | * change primaries should we be sufficiently twisted. |
269 | */ |
270 | |
271 | #define CPUF_BSP 0x0001 /* CPU is the original BSP */ |
272 | #define CPUF_AP 0x0002 /* CPU is an AP */ |
273 | #define CPUF_SP 0x0004 /* CPU is only processor */ |
274 | #define CPUF_PRIMARY 0x0008 /* CPU is active primary processor */ |
275 | |
276 | #define CPUF_SYNCTSC 0x0800 /* Synchronize TSC */ |
277 | #define CPUF_PRESENT 0x1000 /* CPU is present */ |
278 | #define CPUF_RUNNING 0x2000 /* CPU is running */ |
279 | #define CPUF_PAUSE 0x4000 /* CPU is paused in DDB */ |
280 | #define CPUF_GO 0x8000 /* CPU should start running */ |
281 | |
282 | #endif /* _KERNEL || __KMEMUSER */ |
283 | |
284 | #ifdef _KERNEL |
285 | /* |
286 | * We statically allocate the CPU info for the primary CPU (or, |
287 | * the only CPU on uniprocessors), and the primary CPU is the |
288 | * first CPU on the CPU info list. |
289 | */ |
290 | extern struct cpu_info cpu_info_primary; |
291 | extern struct cpu_info *cpu_info_list; |
292 | |
293 | #define CPU_INFO_ITERATOR int __unused |
294 | #define CPU_INFO_FOREACH(cii, ci) ci = cpu_info_list; \ |
295 | ci != NULL; ci = ci->ci_next |
296 | |
297 | #define CPU_STARTUP(_ci, _target) ((_ci)->ci_func->start(_ci, _target)) |
298 | #define CPU_STOP(_ci) ((_ci)->ci_func->stop(_ci)) |
299 | #define CPU_START_CLEANUP(_ci) ((_ci)->ci_func->cleanup(_ci)) |
300 | |
301 | #if !defined(__GNUC__) || defined(_MODULE) |
302 | /* For non-GCC and modules */ |
303 | struct cpu_info *x86_curcpu(void); |
304 | void cpu_set_curpri(int); |
305 | # ifdef __GNUC__ |
306 | lwp_t *x86_curlwp(void) __attribute__ ((const)); |
307 | # else |
308 | lwp_t *x86_curlwp(void); |
309 | # endif |
310 | #endif |
311 | |
312 | #define cpu_number() (cpu_index(curcpu())) |
313 | |
314 | #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) |
315 | |
316 | #define X86_AST_GENERIC 0x01 |
317 | #define X86_AST_PREEMPT 0x02 |
318 | |
319 | #define aston(l, why) ((l)->l_md.md_astpending |= (why)) |
320 | #define cpu_did_resched(l) ((l)->l_md.md_astpending &= ~X86_AST_PREEMPT) |
321 | |
322 | void cpu_boot_secondary_processors(void); |
323 | void cpu_init_idle_lwps(void); |
324 | void cpu_init_msrs(struct cpu_info *, bool); |
325 | void cpu_load_pmap(struct pmap *, struct pmap *); |
326 | void cpu_broadcast_halt(void); |
327 | void cpu_kick(struct cpu_info *); |
328 | |
329 | #define curcpu() x86_curcpu() |
330 | #define curlwp x86_curlwp() |
331 | #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) |
332 | |
333 | /* |
334 | * Arguments to hardclock, softclock and statclock |
335 | * encapsulate the previous machine state in an opaque |
336 | * clockframe; for now, use generic intrframe. |
337 | */ |
338 | struct clockframe { |
339 | struct intrframe cf_if; |
340 | }; |
341 | |
342 | /* |
343 | * Give a profiling tick to the current process when the user profiling |
344 | * buffer pages are invalid. On the i386, request an ast to send us |
345 | * through trap(), marking the proc as needing a profiling tick. |
346 | */ |
347 | extern void cpu_need_proftick(struct lwp *l); |
348 | |
349 | /* |
350 | * Notify the LWP l that it has a signal pending, process as soon as |
351 | * possible. |
352 | */ |
353 | extern void cpu_signotify(struct lwp *); |
354 | |
355 | /* |
356 | * We need a machine-independent name for this. |
357 | */ |
358 | extern void (*delay_func)(unsigned int); |
359 | struct timeval; |
360 | |
361 | #define DELAY(x) (*delay_func)(x) |
362 | #define delay(x) (*delay_func)(x) |
363 | |
364 | extern int biosbasemem; |
365 | extern int biosextmem; |
366 | extern int cputype; |
367 | extern int cpuid_level; |
368 | extern int cpu_class; |
369 | extern char cpu_brand_string[]; |
370 | extern int use_pae; |
371 | |
372 | #ifdef __i386__ |
373 | extern int i386_fpu_present; |
374 | int npx586bug1(int, int); |
375 | extern int i386_fpu_fdivbug; |
376 | extern int i386_use_fxsave; |
377 | extern int i386_has_sse; |
378 | extern int i386_has_sse2; |
379 | #else |
380 | #define i386_fpu_present 1 |
381 | #define i386_fpu_fdivbug 0 |
382 | #define i386_use_fxsave 1 |
383 | #define i386_has_sse 1 |
384 | #define i386_has_sse2 1 |
385 | #endif |
386 | |
387 | extern int x86_fpu_save; |
388 | #define FPU_SAVE_FSAVE 0 |
389 | #define FPU_SAVE_FXSAVE 1 |
390 | #define FPU_SAVE_XSAVE 2 |
391 | #define FPU_SAVE_XSAVEOPT 3 |
392 | extern unsigned int x86_fpu_save_size; |
393 | extern uint64_t x86_xsave_features; |
394 | |
395 | extern void (*x86_cpu_idle)(void); |
396 | #define cpu_idle() (*x86_cpu_idle)() |
397 | |
398 | /* machdep.c */ |
399 | void dumpconf(void); |
400 | void cpu_reset(void); |
401 | void i386_proc0_tss_ldt_init(void); |
402 | void dumpconf(void); |
403 | void cpu_reset(void); |
404 | void x86_64_proc0_tss_ldt_init(void); |
405 | void x86_64_init_pcb_tss_ldt(struct cpu_info *); |
406 | |
407 | /* longrun.c */ |
408 | u_int tmx86_get_longrun_mode(void); |
409 | void tmx86_get_longrun_status(u_int *, u_int *, u_int *); |
410 | void tmx86_init_longrun(void); |
411 | |
412 | /* identcpu.c */ |
413 | void cpu_probe(struct cpu_info *); |
414 | void cpu_identify(struct cpu_info *); |
415 | |
416 | /* cpu_topology.c */ |
417 | void x86_cpu_topology(struct cpu_info *); |
418 | |
419 | /* vm_machdep.c */ |
420 | void cpu_proc_fork(struct proc *, struct proc *); |
421 | |
422 | /* locore.s */ |
423 | struct region_descriptor; |
424 | void lgdt(struct region_descriptor *); |
425 | #ifdef XEN |
426 | void lgdt_finish(void); |
427 | #endif |
428 | |
429 | struct pcb; |
430 | void savectx(struct pcb *); |
431 | void lwp_trampoline(void); |
432 | #ifdef XEN |
433 | void startrtclock(void); |
434 | void xen_delay(unsigned int); |
435 | void xen_initclocks(void); |
436 | void xen_suspendclocks(struct cpu_info *); |
437 | void xen_resumeclocks(struct cpu_info *); |
438 | #else |
439 | /* clock.c */ |
440 | void initrtclock(u_long); |
441 | void startrtclock(void); |
442 | void i8254_delay(unsigned int); |
443 | void i8254_microtime(struct timeval *); |
444 | void i8254_initclocks(void); |
445 | #endif |
446 | |
447 | /* cpu.c */ |
448 | |
449 | void cpu_probe_features(struct cpu_info *); |
450 | |
451 | /* vm_machdep.c */ |
452 | paddr_t kvtop(void *); |
453 | |
454 | #ifdef USER_LDT |
455 | /* sys_machdep.h */ |
456 | int x86_get_ldt(struct lwp *, void *, register_t *); |
457 | int x86_set_ldt(struct lwp *, void *, register_t *); |
458 | #endif |
459 | |
460 | /* isa_machdep.c */ |
461 | void isa_defaultirq(void); |
462 | int isa_nmi(void); |
463 | |
464 | #ifdef VM86 |
465 | /* vm86.c */ |
466 | void vm86_gpfault(struct lwp *, int); |
467 | #endif /* VM86 */ |
468 | |
469 | /* consinit.c */ |
470 | void kgdb_port_init(void); |
471 | |
472 | /* bus_machdep.c */ |
473 | void x86_bus_space_init(void); |
474 | void x86_bus_space_mallocok(void); |
475 | |
476 | #endif /* _KERNEL */ |
477 | |
478 | #if defined(_KERNEL) || defined(_KMEMUSER) |
479 | #include <machine/psl.h> /* Must be after struct cpu_info declaration */ |
480 | #endif /* _KERNEL || __KMEMUSER */ |
481 | |
482 | /* |
483 | * CTL_MACHDEP definitions. |
484 | */ |
485 | #define CPU_CONSDEV 1 /* dev_t: console terminal device */ |
486 | #define CPU_BIOSBASEMEM 2 /* int: bios-reported base mem (K) */ |
487 | #define CPU_BIOSEXTMEM 3 /* int: bios-reported ext. mem (K) */ |
488 | /* CPU_NKPDE 4 obsolete: int: number of kernel PDEs */ |
489 | #define CPU_BOOTED_KERNEL 5 /* string: booted kernel name */ |
490 | #define CPU_DISKINFO 6 /* struct disklist *: |
491 | * disk geometry information */ |
492 | #define CPU_FPU_PRESENT 7 /* int: FPU is present */ |
493 | #define CPU_OSFXSR 8 /* int: OS uses FXSAVE/FXRSTOR */ |
494 | #define CPU_SSE 9 /* int: OS/CPU supports SSE */ |
495 | #define CPU_SSE2 10 /* int: OS/CPU supports SSE2 */ |
496 | #define CPU_TMLR_MODE 11 /* int: longrun mode |
497 | * 0: minimum frequency |
498 | * 1: economy |
499 | * 2: performance |
500 | * 3: maximum frequency |
501 | */ |
502 | #define CPU_TMLR_FREQUENCY 12 /* int: current frequency */ |
503 | #define CPU_TMLR_VOLTAGE 13 /* int: curret voltage */ |
504 | #define CPU_TMLR_PERCENTAGE 14 /* int: current clock percentage */ |
505 | #define CPU_MAXID 15 /* number of valid machdep ids */ |
506 | |
507 | /* |
508 | * Structure for CPU_DISKINFO sysctl call. |
509 | * XXX this should be somewhere else. |
510 | */ |
511 | #define MAX_BIOSDISKS 16 |
512 | |
513 | struct disklist { |
514 | int dl_nbiosdisks; /* number of bios disks */ |
515 | struct biosdisk_info { |
516 | int bi_dev; /* BIOS device # (0x80 ..) */ |
517 | int bi_cyl; /* cylinders on disk */ |
518 | int bi_head; /* heads per track */ |
519 | int bi_sec; /* sectors per track */ |
520 | uint64_t bi_lbasecs; /* total sec. (iff ext13) */ |
521 | #define BIFLAG_INVALID 0x01 |
522 | #define BIFLAG_EXTINT13 0x02 |
523 | int bi_flags; |
524 | } dl_biosdisks[MAX_BIOSDISKS]; |
525 | |
526 | int dl_nnativedisks; /* number of native disks */ |
527 | struct nativedisk_info { |
528 | char ni_devname[16]; /* native device name */ |
529 | int ni_nmatches; /* # of matches w/ BIOS */ |
530 | int ni_biosmatches[MAX_BIOSDISKS]; /* indices in dl_biosdisks */ |
531 | } dl_nativedisks[1]; /* actually longer */ |
532 | }; |
533 | #endif /* !_X86_CPU_H_ */ |
534 | |