root/arch/i386/include/cpufunc.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. invlpg
  2. lidt
  3. lldt
  4. ltr
  5. lcr0
  6. rcr0
  7. rcr2
  8. lcr3
  9. rcr3
  10. lcr4
  11. rcr4
  12. tlbflush
  13. tlbflushg
  14. disable_intr
  15. enable_intr
  16. read_eflags
  17. write_eflags
  18. wbinvd
  19. wrmsr
  20. rdmsr
  21. rdmsr_locked
  22. wrmsr_locked
  23. breakpoint

    1 /*      $OpenBSD: cpufunc.h,v 1.13 2007/02/17 17:38:37 tom Exp $        */
    2 /*      $NetBSD: cpufunc.h,v 1.8 1994/10/27 04:15:59 cgd Exp $  */
    3 
    4 /*
    5  * Copyright (c) 1993 Charles Hannum.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Charles Hannum.
   19  * 4. The name of the author may not be used to endorse or promote products
   20  *    derived from this software without specific prior written permission
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 #ifndef _I386_CPUFUNC_H_
   35 #define _I386_CPUFUNC_H_
   36 
   37 #ifdef _KERNEL
   38 
   39 /*
   40  * Functions to provide access to i386-specific instructions.
   41  */
   42 
   43 #include <sys/cdefs.h>
   44 #include <sys/types.h>
   45 
   46 #include <machine/specialreg.h>
   47 
   48 static __inline void invlpg(u_int);
   49 static __inline void lidt(void *);
   50 static __inline void lldt(u_short);
   51 static __inline void ltr(u_short);
   52 static __inline void lcr0(u_int);
   53 static __inline u_int rcr0(void);
   54 static __inline u_int rcr2(void);
   55 static __inline void lcr3(u_int);
   56 static __inline u_int rcr3(void);
   57 static __inline void lcr4(u_int);
   58 static __inline u_int rcr4(void);
   59 static __inline void tlbflush(void);
   60 static __inline void tlbflushg(void);
   61 static __inline void disable_intr(void);
   62 static __inline void enable_intr(void);
   63 static __inline u_int read_eflags(void);
   64 static __inline void write_eflags(u_int);
   65 static __inline void wbinvd(void);
   66 static __inline void wrmsr(u_int, u_int64_t);
   67 static __inline u_int64_t rdmsr(u_int);
   68 static __inline void breakpoint(void);
   69 
   70 static __inline void 
   71 invlpg(u_int addr)
   72 { 
   73         __asm __volatile("invlpg (%0)" : : "r" (addr) : "memory");
   74 }  
   75 
   76 static __inline void
   77 lidt(void *p)
   78 {
   79         __asm __volatile("lidt (%0)" : : "r" (p));
   80 }
   81 
   82 static __inline void
   83 lldt(u_short sel)
   84 {
   85         __asm __volatile("lldt %0" : : "r" (sel));
   86 }
   87 
   88 static __inline void
   89 ltr(u_short sel)
   90 {
   91         __asm __volatile("ltr %0" : : "r" (sel));
   92 }
   93 
   94 static __inline void
   95 lcr0(u_int val)
   96 {
   97         __asm __volatile("movl %0,%%cr0" : : "r" (val));
   98 }
   99 
  100 static __inline u_int
  101 rcr0(void)
  102 {
  103         u_int val;
  104         __asm __volatile("movl %%cr0,%0" : "=r" (val));
  105         return val;
  106 }
  107 
  108 static __inline u_int
  109 rcr2(void)
  110 {
  111         u_int val;
  112         __asm __volatile("movl %%cr2,%0" : "=r" (val));
  113         return val;
  114 }
  115 
  116 static __inline void
  117 lcr3(u_int val)
  118 {
  119         __asm __volatile("movl %0,%%cr3" : : "r" (val));
  120 }
  121 
  122 static __inline u_int
  123 rcr3(void)
  124 {
  125         u_int val;
  126         __asm __volatile("movl %%cr3,%0" : "=r" (val));
  127         return val;
  128 }
  129 
  130 static __inline void
  131 lcr4(u_int val)
  132 {
  133         __asm __volatile("movl %0,%%cr4" : : "r" (val));
  134 }
  135 
  136 static __inline u_int
  137 rcr4(void)
  138 {
  139         u_int val;
  140         __asm __volatile("movl %%cr4,%0" : "=r" (val));
  141         return val;
  142 }
  143 
  144 static __inline void
  145 tlbflush(void)
  146 {
  147         u_int val;
  148         __asm __volatile("movl %%cr3,%0" : "=r" (val));
  149         __asm __volatile("movl %0,%%cr3" : : "r" (val));
  150 }
  151 
  152 static __inline void
  153 tlbflushg(void)
  154 {
  155         /*
  156          * Big hammer: flush all TLB entries, including ones from PTE's
  157          * with the G bit set.  This should only be necessary if TLB
  158          * shootdown falls far behind.
  159          *
  160          * Intel Architecture Software Developer's Manual, Volume 3,
  161          *      System Programming, section 9.10, "Invalidating the
  162          * Translation Lookaside Buffers (TLBS)":
  163          * "The following operations invalidate all TLB entries, irrespective
  164          * of the setting of the G flag:
  165          * ...
  166          * "(P6 family processors only): Writing to control register CR4 to
  167          * modify the PSE, PGE, or PAE flag."
  168          *
  169          * (the alternatives not quoted above are not an option here.)
  170          *
  171          * If PGE is not in use, we reload CR3 for the benefit of
  172          * pre-P6-family processors.
  173          */
  174 
  175 #if defined(I686_CPU)
  176         if (cpu_feature & CPUID_PGE) {
  177                 u_int cr4 = rcr4();
  178                 lcr4(cr4 & ~CR4_PGE);
  179                 lcr4(cr4);
  180         } else
  181 #endif
  182                 tlbflush();
  183 }
  184 
  185 #ifdef notyet
  186 void    setidt(int idx, /*XXX*/caddr_t func, int typ, int dpl);
  187 #endif
  188 
  189 
  190 /* XXXX ought to be in psl.h with spl() functions */
  191 
  192 static __inline void
  193 disable_intr(void)
  194 {
  195         __asm __volatile("cli");
  196 }
  197 
  198 static __inline void
  199 enable_intr(void)
  200 {
  201         __asm __volatile("sti");
  202 }
  203 
  204 static __inline u_int
  205 read_eflags(void)
  206 {
  207         u_int ef;
  208 
  209         __asm __volatile("pushfl; popl %0" : "=r" (ef));
  210         return (ef);
  211 }
  212 
  213 static __inline void
  214 write_eflags(u_int ef)
  215 {
  216         __asm __volatile("pushl %0; popfl" : : "r" (ef));
  217 }
  218 
  219 static __inline void
  220 wbinvd(void)
  221 {
  222         __asm __volatile("wbinvd");
  223 }
  224 
  225 
  226 static __inline void
  227 wrmsr(u_int msr, u_int64_t newval)
  228 {
  229         __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
  230 }
  231 
  232 static __inline u_int64_t
  233 rdmsr(u_int msr)
  234 {
  235         u_int64_t rv;
  236 
  237         __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
  238         return (rv);
  239 }
  240 
  241 /* 
  242  * Some of the undocumented AMD64 MSRs need a 'passcode' to access.
  243  *
  244  * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c
  245  */
  246 
  247 #define OPTERON_MSR_PASSCODE    0x9c5a203a
  248  
  249 static __inline u_int64_t
  250 rdmsr_locked(u_int msr, u_int code)
  251 {
  252         uint64_t rv;
  253         __asm volatile("rdmsr"
  254             : "=A" (rv)
  255             : "c" (msr), "D" (code));
  256         return (rv);
  257 }
  258 
  259 static __inline void
  260 wrmsr_locked(u_int msr, u_int code, u_int64_t newval)
  261 {
  262         __asm volatile("wrmsr"
  263             :
  264             : "A" (newval), "c" (msr), "D" (code));
  265 }
  266 
  267 /* Break into DDB/KGDB. */
  268 static __inline void
  269 breakpoint(void)
  270 {
  271         __asm __volatile("int $3");
  272 }
  273 
  274 #ifdef I686_CPU
  275 void amd64_errata(struct cpu_info *);
  276 #endif
  277 
  278 #endif /* _KERNEL */
  279 #endif /* !_I386_CPUFUNC_H_ */

/* [<][>][^][v][top][bottom][index][help] */