Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6934 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _ASM_X86_PROCESSOR_H
  2. #define _ASM_X86_PROCESSOR_H
  3.  
  4. #include <asm/processor-flags.h>
  5.  
  6. /* Forward declaration, a strange C thing */
  7. struct task_struct;
  8. struct mm_struct;
  9. struct vm86;
  10.  
  11. #include <asm/math_emu.h>
  12. #include <asm/segment.h>
  13. #include <asm/types.h>
  14. #include <uapi/asm/sigcontext.h>
  15. #include <asm/current.h>
  16. #include <asm/cpufeature.h>
  17. #include <asm/page.h>
  18. #include <asm/pgtable_types.h>
  19. #include <asm/percpu.h>
  20. #include <asm/msr.h>
  21. #include <asm/desc_defs.h>
  22. #include <asm/nops.h>
  23. #include <asm/special_insns.h>
  24. #include <asm/fpu/types.h>
  25.  
  26. #include <linux/personality.h>
  27. #include <linux/cpumask.h>
  28. #include <linux/cache.h>
  29. #include <linux/threads.h>
  30. #include <linux/math64.h>
  31. #include <linux/err.h>
  32. #include <linux/irqflags.h>
  33.  
  34. /*
  35.  * We handle most unaligned accesses in hardware.  On the other hand
  36.  * unaligned DMA can be quite expensive on some Nehalem processors.
  37.  *
  38.  * Based on this we disable the IP header alignment in network drivers.
  39.  */
  40. #define NET_IP_ALIGN    0
  41.  
  42. #define HBP_NUM 4
  43. /*
  44.  * Default implementation of macro that returns current
  45.  * instruction pointer ("program counter").
  46.  */
  47. static inline void *current_text_addr(void)
  48. {
  49.         void *pc;
  50.  
  51.         asm volatile("mov $1f, %0; 1:":"=r" (pc));
  52.  
  53.         return pc;
  54. }
  55.  
  56. /*
  57.  * These alignment constraints are for performance in the vSMP case,
  58.  * but in the task_struct case we must also meet hardware imposed
  59.  * alignment requirements of the FPU state:
  60.  */
  61. #ifdef CONFIG_X86_VSMP
  62. # define ARCH_MIN_TASKALIGN             (1 << INTERNODE_CACHE_SHIFT)
  63. # define ARCH_MIN_MMSTRUCT_ALIGN        (1 << INTERNODE_CACHE_SHIFT)
  64. #else
  65. # define ARCH_MIN_TASKALIGN             16
  66. # define ARCH_MIN_MMSTRUCT_ALIGN        0
  67. #endif
  68.  
  69. enum tlb_infos {
  70.         ENTRIES,
  71.         NR_INFO
  72. };
  73.  
  74. extern u16 __read_mostly tlb_lli_4k[NR_INFO];
  75. extern u16 __read_mostly tlb_lli_2m[NR_INFO];
  76. extern u16 __read_mostly tlb_lli_4m[NR_INFO];
  77. extern u16 __read_mostly tlb_lld_4k[NR_INFO];
  78. extern u16 __read_mostly tlb_lld_2m[NR_INFO];
  79. extern u16 __read_mostly tlb_lld_4m[NR_INFO];
  80. extern u16 __read_mostly tlb_lld_1g[NR_INFO];
  81.  
  82. /*
  83.  *  CPU type and hardware bug flags. Kept separately for each CPU.
  84.  *  Members of this structure are referenced in head.S, so think twice
  85.  *  before touching them. [mj]
  86.  */
  87.  
  88. struct cpuinfo_x86 {
  89.         __u8                    x86;            /* CPU family */
  90.         __u8                    x86_vendor;     /* CPU vendor */
  91.         __u8                    x86_model;
  92.         __u8                    x86_mask;
  93. #ifdef CONFIG_X86_32
  94.         char                    wp_works_ok;    /* It doesn't on 386's */
  95.  
  96.         /* Problems on some 486Dx4's and old 386's: */
  97.         char                    rfu;
  98.         char                    pad0;
  99.         char                    pad1;
  100. #else
  101.         /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  102.         int                     x86_tlbsize;
  103. #endif
  104.         __u8                    x86_virt_bits;
  105.         __u8                    x86_phys_bits;
  106.         /* CPUID returned core id bits: */
  107.         __u8                    x86_coreid_bits;
  108.         /* Max extended CPUID function supported: */
  109.         __u32                   extended_cpuid_level;
  110.         /* Maximum supported CPUID level, -1=no CPUID: */
  111.         int                     cpuid_level;
  112.         __u32                   x86_capability[NCAPINTS + NBUGINTS];
  113.         char                    x86_vendor_id[16];
  114.         char                    x86_model_id[64];
  115.         /* in KB - valid for CPUS which support this call: */
  116.         int                     x86_cache_size;
  117.         int                     x86_cache_alignment;    /* In bytes */
  118.         /* Cache QoS architectural values: */
  119.         int                     x86_cache_max_rmid;     /* max index */
  120.         int                     x86_cache_occ_scale;    /* scale to bytes */
  121.         int                     x86_power;
  122.         unsigned long           loops_per_jiffy;
  123.         /* cpuid returned max cores value: */
  124.         u16                      x86_max_cores;
  125.         u16                     apicid;
  126.         u16                     initial_apicid;
  127.         u16                     x86_clflush_size;
  128.         /* number of cores as seen by the OS: */
  129.         u16                     booted_cores;
  130.         /* Physical processor id: */
  131.         u16                     phys_proc_id;
  132.         /* Core id: */
  133.         u16                     cpu_core_id;
  134.         /* Compute unit id */
  135.         u8                      compute_unit_id;
  136.         /* Index into per_cpu list: */
  137.         u16                     cpu_index;
  138.         u32                     microcode;
  139. };
  140.  
  141. #define X86_VENDOR_INTEL        0
  142. #define X86_VENDOR_CYRIX        1
  143. #define X86_VENDOR_AMD          2
  144. #define X86_VENDOR_UMC          3
  145. #define X86_VENDOR_CENTAUR      5
  146. #define X86_VENDOR_TRANSMETA    7
  147. #define X86_VENDOR_NSC          8
  148. #define X86_VENDOR_NUM          9
  149.  
  150. #define X86_VENDOR_UNKNOWN      0xff
  151.  
  152. /*
  153.  * capabilities of CPUs
  154.  */
  155. extern struct cpuinfo_x86       boot_cpu_data;
  156. extern struct cpuinfo_x86       new_cpu_data;
  157.  
  158. extern struct tss_struct        doublefault_tss;
  159. extern __u32                    cpu_caps_cleared[NCAPINTS];
  160. extern __u32                    cpu_caps_set[NCAPINTS];
  161.  
  162. #ifdef CONFIG_SMP
  163. DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
  164. #define cpu_data(cpu)           per_cpu(cpu_info, cpu)
  165. #else
  166. #define cpu_info                boot_cpu_data
  167. #define cpu_data(cpu)           boot_cpu_data
  168. #endif
  169.  
  170. extern const struct seq_operations cpuinfo_op;
  171.  
  172. extern void cpu_detect(struct cpuinfo_x86 *c);
  173.  
  174. extern void early_cpu_init(void);
  175. extern void identify_boot_cpu(void);
  176. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  177. extern void print_cpu_info(struct cpuinfo_x86 *);
  178. void print_cpu_msr(struct cpuinfo_x86 *);
  179. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  180. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  181. extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
  182.  
  183. extern void detect_extended_topology(struct cpuinfo_x86 *c);
  184. extern void detect_ht(struct cpuinfo_x86 *c);
  185.  
  186. #ifdef CONFIG_X86_32
  187. extern int have_cpuid_p(void);
  188. #else
  189. static inline int have_cpuid_p(void)
  190. {
  191.         return 1;
  192. }
  193. #endif
  194. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  195.                                 unsigned int *ecx, unsigned int *edx)
  196. {
  197.         /* ecx is often an input as well as an output. */
  198.         asm volatile("cpuid"
  199.             : "=a" (*eax),
  200.               "=b" (*ebx),
  201.               "=c" (*ecx),
  202.               "=d" (*edx)
  203.             : "0" (*eax), "2" (*ecx)
  204.             : "memory");
  205. }
  206.  
  207. static inline void load_cr3(pgd_t *pgdir)
  208. {
  209.         write_cr3(__pa(pgdir));
  210. }
  211.  
  212. #ifdef CONFIG_X86_32
  213. /* This is the TSS defined by the hardware. */
  214. struct x86_hw_tss {
  215.         unsigned short          back_link, __blh;
  216.         unsigned long           sp0;
  217.         unsigned short          ss0, __ss0h;
  218.         unsigned long           sp1;
  219.  
  220.         /*
  221.          * We don't use ring 1, so ss1 is a convenient scratch space in
  222.          * the same cacheline as sp0.  We use ss1 to cache the value in
  223.          * MSR_IA32_SYSENTER_CS.  When we context switch
  224.          * MSR_IA32_SYSENTER_CS, we first check if the new value being
  225.          * written matches ss1, and, if it's not, then we wrmsr the new
  226.          * value and update ss1.
  227.          *
  228.          * The only reason we context switch MSR_IA32_SYSENTER_CS is
  229.          * that we set it to zero in vm86 tasks to avoid corrupting the
  230.          * stack if we were to go through the sysenter path from vm86
  231.          * mode.
  232.          */
  233.         unsigned short          ss1;    /* MSR_IA32_SYSENTER_CS */
  234.  
  235.         unsigned short          __ss1h;
  236.         unsigned long           sp2;
  237.         unsigned short          ss2, __ss2h;
  238.         unsigned long           __cr3;
  239.         unsigned long           ip;
  240.         unsigned long           flags;
  241.         unsigned long           ax;
  242.         unsigned long           cx;
  243.         unsigned long           dx;
  244.         unsigned long           bx;
  245.         unsigned long           sp;
  246.         unsigned long           bp;
  247.         unsigned long           si;
  248.         unsigned long           di;
  249.         unsigned short          es, __esh;
  250.         unsigned short          cs, __csh;
  251.         unsigned short          ss, __ssh;
  252.         unsigned short          ds, __dsh;
  253.         unsigned short          fs, __fsh;
  254.         unsigned short          gs, __gsh;
  255.         unsigned short          ldt, __ldth;
  256.         unsigned short          trace;
  257.         unsigned short          io_bitmap_base;
  258.  
  259. } __attribute__((packed));
  260. #else
  261. struct x86_hw_tss {
  262.         u32                     reserved1;
  263.         u64                     sp0;
  264.         u64                     sp1;
  265.         u64                     sp2;
  266.         u64                     reserved2;
  267.         u64                     ist[7];
  268.         u32                     reserved3;
  269.         u32                     reserved4;
  270.         u16                     reserved5;
  271.         u16                     io_bitmap_base;
  272.  
  273. } __attribute__((packed)) ____cacheline_aligned;
  274. #endif
  275.  
  276. /*
  277.  * IO-bitmap sizes:
  278.  */
  279. #define IO_BITMAP_BITS                  65536
  280. #define IO_BITMAP_BYTES                 (IO_BITMAP_BITS/8)
  281. #define IO_BITMAP_LONGS                 (IO_BITMAP_BYTES/sizeof(long))
  282. #define IO_BITMAP_OFFSET                offsetof(struct tss_struct, io_bitmap)
  283. #define INVALID_IO_BITMAP_OFFSET        0x8000
  284.  
  285. struct tss_struct {
  286.         /*
  287.          * The hardware state:
  288.          */
  289.         struct x86_hw_tss       x86_tss;
  290.  
  291.         /*
  292.          * The extra 1 is there because the CPU will access an
  293.          * additional byte beyond the end of the IO permission
  294.          * bitmap. The extra byte must be all 1 bits, and must
  295.          * be within the limit.
  296.          */
  297.         unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
  298.  
  299.         /*
  300.          * Space for the temporary SYSENTER stack:
  301.          */
  302.         unsigned long           SYSENTER_stack[64];
  303.  
  304. } ____cacheline_aligned;
  305.  
  306. DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
  307.  
  308. #ifdef CONFIG_X86_32
  309. DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
  310. #endif
  311.  
  312. /*
  313.  * Save the original ist values for checking stack pointers during debugging
  314.  */
  315. struct orig_ist {
  316.         unsigned long           ist[7];
  317. };
  318.  
  319. #ifdef CONFIG_X86_64
  320. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  321.  
  322. union irq_stack_union {
  323.         char irq_stack[IRQ_STACK_SIZE];
  324.         /*
  325.          * GCC hardcodes the stack canary as %gs:40.  Since the
  326.          * irq_stack is the object at %gs:0, we reserve the bottom
  327.          * 48 bytes of the irq stack for the canary.
  328.          */
  329.         struct {
  330.                 char gs_base[40];
  331.                 unsigned long stack_canary;
  332.         };
  333. };
  334.  
  335. DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
  336. DECLARE_INIT_PER_CPU(irq_stack_union);
  337.  
  338. DECLARE_PER_CPU(char *, irq_stack_ptr);
  339. DECLARE_PER_CPU(unsigned int, irq_count);
  340. extern asmlinkage void ignore_sysret(void);
  341. #else   /* X86_64 */
  342. #ifdef CONFIG_CC_STACKPROTECTOR
  343. /*
  344.  * Make sure stack canary segment base is cached-aligned:
  345.  *   "For Intel Atom processors, avoid non zero segment base address
  346.  *    that is not aligned to cache line boundary at all cost."
  347.  * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
  348.  */
  349. struct stack_canary {
  350.         char __pad[20];         /* canary at %gs:20 */
  351.         unsigned long canary;
  352. };
  353. DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
  354. #endif
  355. /*
  356.  * per-CPU IRQ handling stacks
  357.  */
  358. struct irq_stack {
  359.         u32                     stack[THREAD_SIZE/sizeof(u32)];
  360. } __aligned(THREAD_SIZE);
  361.  
  362. DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
  363. DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
  364. #endif  /* X86_64 */
  365.  
  366. extern unsigned int xstate_size;
  367.  
  368. struct perf_event;
  369.  
  370. struct thread_struct {
  371.         /* Cached TLS descriptors: */
  372.         struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
  373.         unsigned long           sp0;
  374.         unsigned long           sp;
  375. #ifdef CONFIG_X86_32
  376.         unsigned long           sysenter_cs;
  377. #else
  378.         unsigned short          es;
  379.         unsigned short          ds;
  380.         unsigned short          fsindex;
  381.         unsigned short          gsindex;
  382. #endif
  383. #ifdef CONFIG_X86_32
  384.         unsigned long           ip;
  385. #endif
  386. #ifdef CONFIG_X86_64
  387.         unsigned long           fs;
  388. #endif
  389.         unsigned long           gs;
  390.  
  391.         /* Save middle states of ptrace breakpoints */
  392.         struct perf_event       *ptrace_bps[HBP_NUM];
  393.         /* Debug status used for traps, single steps, etc... */
  394.         unsigned long           debugreg6;
  395.         /* Keep track of the exact dr7 value set by the user */
  396.         unsigned long           ptrace_dr7;
  397.         /* Fault info: */
  398.         unsigned long           cr2;
  399.         unsigned long           trap_nr;
  400.         unsigned long           error_code;
  401. #ifdef CONFIG_VM86
  402.         /* Virtual 86 mode info */
  403.         struct vm86             *vm86;
  404. #endif
  405.         /* IO permissions: */
  406.         unsigned long           *io_bitmap_ptr;
  407.         unsigned long           iopl;
  408.         /* Max allowed port in the bitmap, in bytes: */
  409.         unsigned                io_bitmap_max;
  410.  
  411.         /* Floating point and extended processor state */
  412.         struct fpu              fpu;
  413.         /*
  414.          * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
  415.          * the end.
  416.          */
  417. };
  418.  
  419. /*
  420.  * Set IOPL bits in EFLAGS from given mask
  421.  */
  422. static inline void native_set_iopl_mask(unsigned mask)
  423. {
  424. #ifdef CONFIG_X86_32
  425.         unsigned int reg;
  426.  
  427.         asm volatile ("pushfl;"
  428.                       "popl %0;"
  429.                       "andl %1, %0;"
  430.                       "orl %2, %0;"
  431.                       "pushl %0;"
  432.                       "popfl"
  433.                       : "=&r" (reg)
  434.                       : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  435. #endif
  436. }
  437.  
  438. static inline void
  439. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  440. {
  441.         tss->x86_tss.sp0 = thread->sp0;
  442. #ifdef CONFIG_X86_32
  443.         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  444.         if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  445.                 tss->x86_tss.ss1 = thread->sysenter_cs;
  446.                 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  447.         }
  448. #endif
  449. }
  450.  
  451. static inline void native_swapgs(void)
  452. {
  453. #ifdef CONFIG_X86_64
  454.         asm volatile("swapgs" ::: "memory");
  455. #endif
  456. }
  457.  
  458.  
  459. #ifdef CONFIG_PARAVIRT
  460. #include <asm/paravirt.h>
  461. #else
  462. #define __cpuid                 native_cpuid
  463. #define paravirt_enabled()      0
  464. #define paravirt_has(x)         0
  465.  
  466. static inline void load_sp0(struct tss_struct *tss,
  467.                             struct thread_struct *thread)
  468. {
  469.         native_load_sp0(tss, thread);
  470. }
  471.  
  472. #define set_iopl_mask native_set_iopl_mask
  473. #endif /* CONFIG_PARAVIRT */
  474.  
  475. typedef struct {
  476.         unsigned long           seg;
  477. } mm_segment_t;
  478.  
  479.  
  480. /* Free all resources held by a thread. */
  481. extern void release_thread(struct task_struct *);
  482.  
  483. unsigned long get_wchan(struct task_struct *p);
  484.  
  485. /*
  486.  * Generic CPUID function
  487.  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  488.  * resulting in stale register contents being returned.
  489.  */
  490. static inline void cpuid(unsigned int op,
  491.                          unsigned int *eax, unsigned int *ebx,
  492.                          unsigned int *ecx, unsigned int *edx)
  493. {
  494.         *eax = op;
  495.         *ecx = 0;
  496.         __cpuid(eax, ebx, ecx, edx);
  497. }
  498.  
  499. /* Some CPUID calls want 'count' to be placed in ecx */
  500. static inline void cpuid_count(unsigned int op, int count,
  501.                                unsigned int *eax, unsigned int *ebx,
  502.                                unsigned int *ecx, unsigned int *edx)
  503. {
  504.         *eax = op;
  505.         *ecx = count;
  506.         __cpuid(eax, ebx, ecx, edx);
  507. }
  508.  
  509. /*
  510.  * CPUID functions returning a single datum
  511.  */
  512. static inline unsigned int cpuid_eax(unsigned int op)
  513. {
  514.         unsigned int eax, ebx, ecx, edx;
  515.  
  516.         cpuid(op, &eax, &ebx, &ecx, &edx);
  517.  
  518.         return eax;
  519. }
  520.  
  521. static inline unsigned int cpuid_ebx(unsigned int op)
  522. {
  523.         unsigned int eax, ebx, ecx, edx;
  524.  
  525.         cpuid(op, &eax, &ebx, &ecx, &edx);
  526.  
  527.         return ebx;
  528. }
  529.  
  530. static inline unsigned int cpuid_ecx(unsigned int op)
  531. {
  532.         unsigned int eax, ebx, ecx, edx;
  533.  
  534.         cpuid(op, &eax, &ebx, &ecx, &edx);
  535.  
  536.         return ecx;
  537. }
  538.  
  539. static inline unsigned int cpuid_edx(unsigned int op)
  540. {
  541.         unsigned int eax, ebx, ecx, edx;
  542.  
  543.         cpuid(op, &eax, &ebx, &ecx, &edx);
  544.  
  545.         return edx;
  546. }
  547.  
  548. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  549. static __always_inline void rep_nop(void)
  550. {
  551.         asm volatile("rep; nop" ::: "memory");
  552. }
  553.  
  554. static __always_inline void cpu_relax(void)
  555. {
  556.         rep_nop();
  557. }
  558.  
  559. #define cpu_relax_lowlatency() cpu_relax()
  560.  
  561. /* Stop speculative execution and prefetching of modified code. */
  562. static inline void sync_core(void)
  563. {
  564.         int tmp;
  565.  
  566. #ifdef CONFIG_M486
  567.         /*
  568.          * Do a CPUID if available, otherwise do a jump.  The jump
  569.          * can conveniently enough be the jump around CPUID.
  570.          */
  571.         asm volatile("cmpl %2,%1\n\t"
  572.                      "jl 1f\n\t"
  573.                      "cpuid\n"
  574.                      "1:"
  575.                      : "=a" (tmp)
  576.                      : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
  577.                      : "ebx", "ecx", "edx", "memory");
  578. #else
  579.         /*
  580.          * CPUID is a barrier to speculative execution.
  581.          * Prefetched instructions are automatically
  582.          * invalidated when modified.
  583.          */
  584.         asm volatile("cpuid"
  585.                      : "=a" (tmp)
  586.                      : "0" (1)
  587.                      : "ebx", "ecx", "edx", "memory");
  588. #endif
  589. }
  590.  
  591. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  592. extern void init_amd_e400_c1e_mask(void);
  593.  
  594. extern unsigned long            boot_option_idle_override;
  595. extern bool                     amd_e400_c1e_detected;
  596.  
  597. enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
  598.                          IDLE_POLL};
  599.  
  600. extern void enable_sep_cpu(void);
  601. extern int sysenter_setup(void);
  602.  
  603. extern void early_trap_init(void);
  604. void early_trap_pf_init(void);
  605.  
  606. /* Defined in head.S */
  607. extern struct desc_ptr          early_gdt_descr;
  608.  
  609. extern void cpu_set_gdt(int);
  610. extern void switch_to_new_gdt(int);
  611. extern void load_percpu_segment(int);
  612. extern void cpu_init(void);
  613.  
  614. static inline unsigned long get_debugctlmsr(void)
  615. {
  616.         unsigned long debugctlmsr = 0;
  617.  
  618. #ifndef CONFIG_X86_DEBUGCTLMSR
  619.         if (boot_cpu_data.x86 < 6)
  620.                 return 0;
  621. #endif
  622.         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  623.  
  624.         return debugctlmsr;
  625. }
  626.  
  627. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  628. {
  629. #ifndef CONFIG_X86_DEBUGCTLMSR
  630.         if (boot_cpu_data.x86 < 6)
  631.                 return;
  632. #endif
  633.         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  634. }
  635.  
  636. extern void set_task_blockstep(struct task_struct *task, bool on);
  637.  
  638. /* Boot loader type from the setup header: */
  639. extern int                      bootloader_type;
  640. extern int                      bootloader_version;
  641.  
  642. extern char                     ignore_fpu_irq;
  643.  
  644. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  645. #define ARCH_HAS_PREFETCHW
  646. #define ARCH_HAS_SPINLOCK_PREFETCH
  647.  
  648. #ifdef CONFIG_X86_32
  649. # define BASE_PREFETCH          ""
  650. # define ARCH_HAS_PREFETCH
  651. #else
  652. # define BASE_PREFETCH          "prefetcht0 %P1"
  653. #endif
  654.  
  655. /*
  656.  * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  657.  *
  658.  * It's not worth to care about 3dnow prefetches for the K6
  659.  * because they are microcoded there and very slow.
  660.  */
  661. static inline void prefetch(const void *x)
  662. {
  663.         alternative_input(BASE_PREFETCH,
  664.                           "prefetchnta (%1)",
  665.                           X86_FEATURE_XMM,
  666.                           "r" (x));
  667. }
  668.  
  669. /*
  670.  * 3dnow prefetch to get an exclusive cache line.
  671.  * Useful for spinlocks to avoid one state transition in the
  672.  * cache coherency protocol:
  673.  */
  674. static inline void prefetchw(const void *x)
  675. {
  676.         alternative_input(BASE_PREFETCH,
  677.                           "prefetchw (%1)",
  678.                           X86_FEATURE_3DNOW,
  679.                           "r" (x));
  680. }
  681.  
  682. static inline void spin_lock_prefetch(const void *x)
  683. {
  684.         prefetchw(x);
  685. }
  686.  
  687. #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
  688.                            TOP_OF_KERNEL_STACK_PADDING)
  689.  
  690. #ifdef CONFIG_X86_32
  691. /*
  692.  * User space process size: 3GB (default).
  693.  */
  694. #define TASK_SIZE               PAGE_OFFSET
  695. #define TASK_SIZE_MAX           TASK_SIZE
  696. #define STACK_TOP               TASK_SIZE
  697. #define STACK_TOP_MAX           STACK_TOP
  698.  
  699. #define INIT_THREAD  {                                                    \
  700.         .sp0                    = TOP_OF_INIT_STACK,                      \
  701.         .sysenter_cs            = __KERNEL_CS,                            \
  702.         .io_bitmap_ptr          = NULL,                                   \
  703. }
  704.  
  705. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  706.  
  707. /*
  708.  * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
  709.  * This is necessary to guarantee that the entire "struct pt_regs"
  710.  * is accessible even if the CPU haven't stored the SS/ESP registers
  711.  * on the stack (interrupt gate does not save these registers
  712.  * when switching to the same priv ring).
  713.  * Therefore beware: accessing the ss/esp fields of the
  714.  * "struct pt_regs" is possible, but they may contain the
  715.  * completely wrong values.
  716.  */
  717. #define task_pt_regs(task) \
  718. ({                                                                      \
  719.         unsigned long __ptr = (unsigned long)task_stack_page(task);     \
  720.         __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;             \
  721.         ((struct pt_regs *)__ptr) - 1;                                  \
  722. })
  723.  
  724. #define KSTK_ESP(task)          (task_pt_regs(task)->sp)
  725.  
  726. #else
  727. /*
  728.  * User space process size. 47bits minus one guard page.  The guard
  729.  * page is necessary on Intel CPUs: if a SYSCALL instruction is at
  730.  * the highest possible canonical userspace address, then that
  731.  * syscall will enter the kernel with a non-canonical return
  732.  * address, and SYSRET will explode dangerously.  We avoid this
  733.  * particular problem by preventing anything from being mapped
  734.  * at the maximum canonical address.
  735.  */
  736. #define TASK_SIZE_MAX   ((1UL << 47) - PAGE_SIZE)
  737.  
  738. /* This decides where the kernel will search for a free chunk of vm
  739.  * space during mmap's.
  740.  */
  741. #define IA32_PAGE_OFFSET        ((current->personality & ADDR_LIMIT_3GB) ? \
  742.                                         0xc0000000 : 0xFFFFe000)
  743.  
  744. #define TASK_SIZE               (test_thread_flag(TIF_ADDR32) ? \
  745.                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  746. #define TASK_SIZE_OF(child)     ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
  747.                                         IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  748.  
  749. #define STACK_TOP               TASK_SIZE
  750. #define STACK_TOP_MAX           TASK_SIZE_MAX
  751.  
  752. #define INIT_THREAD  { \
  753.         .sp0 = TOP_OF_INIT_STACK \
  754. }
  755.  
  756. /*
  757.  * Return saved PC of a blocked thread.
  758.  * What is this good for? it will be always the scheduler or ret_from_fork.
  759.  */
  760. #define thread_saved_pc(t)      (*(unsigned long *)((t)->thread.sp - 8))
  761.  
  762. #define task_pt_regs(tsk)       ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  763. extern unsigned long KSTK_ESP(struct task_struct *task);
  764.  
  765. #endif /* CONFIG_X86_64 */
  766.  
  767. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  768.                                                unsigned long new_sp);
  769.  
  770. /*
  771.  * This decides where the kernel will search for a free chunk of vm
  772.  * space during mmap's.
  773.  */
  774. #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 3))
  775.  
  776. #define KSTK_EIP(task)          (task_pt_regs(task)->ip)
  777.  
  778. /* Get/set a process' ability to use the timestamp counter instruction */
  779. #define GET_TSC_CTL(adr)        get_tsc_mode((adr))
  780. #define SET_TSC_CTL(val)        set_tsc_mode((val))
  781.  
  782. extern int get_tsc_mode(unsigned long adr);
  783. extern int set_tsc_mode(unsigned int val);
  784.  
  785. /* Register/unregister a process' MPX related resource */
  786. #define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
  787. #define MPX_DISABLE_MANAGEMENT()        mpx_disable_management()
  788.  
  789. #ifdef CONFIG_X86_INTEL_MPX
  790. extern int mpx_enable_management(void);
  791. extern int mpx_disable_management(void);
  792. #else
  793. static inline int mpx_enable_management(void)
  794. {
  795.         return -EINVAL;
  796. }
  797. static inline int mpx_disable_management(void)
  798. {
  799.         return -EINVAL;
  800. }
  801. #endif /* CONFIG_X86_INTEL_MPX */
  802.  
  803. extern u16 amd_get_nb_id(int cpu);
  804. extern u32 amd_get_nodes_per_socket(void);
  805.  
  806. static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
  807. {
  808.         uint32_t base, eax, signature[3];
  809.  
  810.         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
  811.                 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
  812.  
  813.                 if (!memcmp(sig, signature, 12) &&
  814.                     (leaves == 0 || ((eax - base) >= leaves)))
  815.                         return base;
  816.         }
  817.  
  818.         return 0;
  819. }
  820.  
  821. extern unsigned long arch_align_stack(unsigned long sp);
  822. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  823.  
  824. void default_idle(void);
  825. #ifdef  CONFIG_XEN
  826. bool xen_set_default_idle(void);
  827. #else
  828. #define xen_set_default_idle 0
  829. #endif
  830.  
  831. void stop_this_cpu(void *dummy);
  832. void df_debug(struct pt_regs *regs, long error_code);
  833. #endif /* _ASM_X86_PROCESSOR_H */
  834.